diff --git a/src/mzscheme/gc2/newgc.c b/src/mzscheme/gc2/newgc.c index 82c36d0436..40787a55db 100644 --- a/src/mzscheme/gc2/newgc.c +++ b/src/mzscheme/gc2/newgc.c @@ -198,8 +198,8 @@ inline static void free_used_pages(size_t len) /*****************************************************************************/ #ifdef NEWGC_MEMORY_TRACE # error "memory tracing not implemented in this particular revision \ - please revert to early versions of this collector, and then nag \ - Adam (awick@cs.utah.edu) to put this stuff back in" + please revert to early versions of this collector, and then nag \ +Adam (awick@cs.utah.edu) to put this stuff back in" #endif int GC_mtrace_new_id(void *f) @@ -263,8 +263,8 @@ inline static struct mpage *pagemap_find_page(void *p) { map itself serves two important purposes: Between collections, it maps pointers to write-protected pages, so that - the write-barrier can identify what page a write has happened to and - mark it as potentially containing pointers from gen 1 to gen 0. + the write-barrier can identify what page a write has happened to and + mark it as potentially containing pointers from gen 1 to gen 0. During collections, it maps pointers to "from" pages. */ @@ -353,7 +353,7 @@ int GC_is_allocated(void *p) GC->gen0.curr_alloc_page is the member of this list we are currently allocating on. The size count helps us trigger collection quickly when we're running out of space; see the test in allocate_big. -*/ + */ THREAD_LOCAL unsigned long GC_gen0_alloc_page_ptr = 0; THREAD_LOCAL unsigned long GC_gen0_alloc_page_end = 0; @@ -576,7 +576,7 @@ inline static void *fast_malloc_one_small_tagged(size_t sizeb, int dirty) bzero(retval, sizeb); ((struct objhead *)retval)->size = (sizeb >> gcLOG_WORD_SIZE); - + return PTR(NUM(retval) + WORD_SIZE); } } @@ -607,7 +607,7 @@ long GC_initial_word(int sizeb) struct objhead info; sizeb = ALIGN_BYTES_SIZE(gcWORDS_TO_BYTES(gcBYTES_TO_WORDS(sizeb)) + WORD_SIZE); - + memset(&info, 0, sizeof(struct objhead)); info.size = (sizeb >> gcLOG_WORD_SIZE); memcpy(&w, &info, sizeof(struct objhead)); @@ -662,11 +662,7 @@ inline static void resize_gen0(unsigned long new_size) /* remove the excess pages */ while(work) { struct mpage *next = work->next; - work->big_page = 1; - work->size = GEN0_PAGE_SIZE; - pagemap_remove(work); - free_pages(work->addr, GEN0_PAGE_SIZE); - free_mpage(work); + gen0_free_mpage(work); work = next; } } @@ -716,14 +712,14 @@ static int collections = 0; static void init_debug_file(void) { /* - char filename_buf[20]; - snprintf(filename_buf, 20, "gclog%d%d", (collections / 10), (collections % 10)); - dump = fopen(filename_buf, "a"); - collections += 1; - */ + char filename_buf[20]; + snprintf(filename_buf, 20, "gclog%d%d", (collections / 10), (collections % 10)); + dump = fopen(filename_buf, "a"); + collections += 1; + */ char *filename = malloc(8 * sizeof(char)); - + filename[0] = 'g'; filename[1] = 'c'; filename[2] = 'l'; filename[3] = 'o'; filename[4] = 'g'; filename[5] = '0' + (collections / 10); @@ -743,9 +739,9 @@ static void dump_region(void **start, void **end) { while(start < end) { fprintf(dump, "%.8lx: %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx\n", - NUM(start), NUM(*start), NUM(*(start + 1)), NUM(*(start + 2)), - NUM(*(start + 3)), NUM(*(start + 4)), NUM(*(start + 5)), - NUM(*(start + 6)), NUM(*(start + 7))); + NUM(start), NUM(*start), NUM(*(start + 1)), NUM(*(start + 2)), + NUM(*(start + 3)), NUM(*(start + 4)), NUM(*(start + 5)), + NUM(*(start + 6)), NUM(*(start + 7))); start += 8; } fprintf(dump, "\n\n"); @@ -827,7 +823,7 @@ static void set_backtrace_source(void *source, int type) } static void record_backtrace(struct mpage *page, void *ptr) -/* ptr is after objhead */ + /* ptr is after objhead */ { unsigned long delta; @@ -837,7 +833,7 @@ static void record_backtrace(struct mpage *page, void *ptr) } static void copy_backtrace_source(struct mpage *to_page, void *to_ptr, - struct mpage *from_page, void *from_ptr) + struct mpage *from_page, void *from_ptr) /* ptrs are at objhead */ { unsigned long to_delta, from_delta; @@ -850,7 +846,7 @@ static void copy_backtrace_source(struct mpage *to_page, void *to_ptr, } static void *get_backtrace(struct mpage *page, void *ptr) -/* ptr is after objhead */ + /* ptr is after objhead */ { unsigned long delta; @@ -936,19 +932,19 @@ static inline void *get_stack_base() { #include "roots.c" #define traverse_roots(gcMUCK, set_bt_src) { \ - unsigned long j; \ - if(roots->roots) { \ - sort_and_merge_roots(); \ - for(j = 0; j < roots->count; j += 2) { \ - void **start = (void**)roots->roots[j]; \ - void **end = (void**)roots->roots[j+1]; \ - while(start < end) { \ - set_bt_src(start, BT_ROOT); \ - gcMUCK(*start++); \ - } \ - } \ - } \ - } + unsigned long j; \ + if(roots->roots) { \ + sort_and_merge_roots(); \ + for(j = 0; j < roots->count; j += 2) { \ + void **start = (void**)roots->roots[j]; \ + void **end = (void**)roots->roots[j+1]; \ + while(start < end) { \ + set_bt_src(start, BT_ROOT); \ + gcMUCK(*start++); \ + } \ + } \ + } \ +} inline static void mark_roots() { @@ -1022,8 +1018,8 @@ inline static void check_finalizers(int level) struct finalizer *next = GC_resolve(work->next); GCDEBUG((DEBUGOUTF, - "CFNL: Level %i finalizer %p on %p queued for finalization.\n", - work->eager_level, work, work->p)); + "CFNL: Level %i finalizer %p on %p queued for finalization.\n", + work->eager_level, work, work->p)); set_backtrace_source(work, BT_FINALIZER); gcMARK(work->p); if(prev) prev->next = next; @@ -1036,8 +1032,8 @@ inline static void check_finalizers(int level) work = next; } else { GCDEBUG((DEBUGOUTF, "CFNL: Not finalizing %p (level %i on %p): %p / %i\n", - work, work->eager_level, work->p, pagemap_find_page(work->p), - marked(work->p))); + work, work->eager_level, work->p, pagemap_find_page(work->p), + marked(work->p))); prev = work; work = GC_resolve(work->next); } @@ -1052,8 +1048,8 @@ inline static void do_ordered_level3(void) for(temp = GC_resolve(finalizers); temp; temp = GC_resolve(temp->next)) if(!marked(temp->p)) { GCDEBUG((DEBUGOUTF, - "LVL3: %p is not marked. Marking payload (%p)\n", - temp, temp->p)); + "LVL3: %p is not marked. Marking payload (%p)\n", + temp, temp->p)); set_backtrace_source(temp, BT_FINALIZER); if(temp->tagged) mark_table[*(unsigned short*)temp->p](temp->p); if(!temp->tagged) GC_mark_xtagged(temp->p); @@ -1371,7 +1367,7 @@ void GC_init_type_tags(int count, int pair, int mutable_pair, int weakbox, int e GC = malloc(sizeof(NewGC)); NewGC_initialize(GC); - + GC->weak_box_tag = weakbox; GC->ephemeron_tag = ephemeron; GC->weak_array_tag = weakarray; @@ -1386,9 +1382,9 @@ void GC_init_type_tags(int count, int pair, int mutable_pair, int weakbox, int e malloc()/free()ed memory, etc., and there's also the administrative structures for the GC itself. */ GC->max_pages_for_use = GC->max_pages_in_heap / 2; - + resize_gen0(GEN0_INITIAL_SIZE); - + GC_register_traversers(GC->weak_box_tag, size_weak_box, mark_weak_box, fixup_weak_box, 0, 0); GC_register_traversers(GC->ephemeron_tag, size_ephemeron, mark_ephemeron, fixup_ephemeron, 0, 0); GC_register_traversers(GC->weak_array_tag, size_weak_array, mark_weak_array, fixup_weak_array, 0, 0); @@ -1410,7 +1406,7 @@ void GC_gcollect(void) } void GC_register_traversers(short tag, Size_Proc size, Mark_Proc mark, - Fixup_Proc fixup, int constant_Size, int atomic) + Fixup_Proc fixup, int constant_Size, int atomic) { GC->mark_table[tag] = atomic ? (Mark_Proc)PAGE_ATOMIC : mark; GC->fixup_table[tag] = fixup; @@ -1460,138 +1456,140 @@ void GC_mark(const void *const_p) if(page->big_page) { /* This is a bigpage. The first thing we do is see if its been marked - previously */ + previously */ if(page->big_page == 1) { - /* in this case, it has not. So we want to mark it, first off. */ - page->big_page = 2; + /* in this case, it has not. So we want to mark it, first off. */ + page->big_page = 2; - /* if this is in the nursery, we want to move it out of the nursery */ - if(!page->generation) { - page->generation = 1; - if(page->prev) page->prev->next = page->next; else - GC->gen0.big_pages = page->next; - if(page->next) page->next->prev = page->prev; + /* if this is in the nursery, we want to move it out of the nursery */ + if(!page->generation) { + page->generation = 1; - backtrace_new_page(page); + /* remove page */ + if(page->prev) page->prev->next = page->next; else + GC->gen0.big_pages = page->next; + if(page->next) page->next->prev = page->prev; - /* add to gen1 */ - page->next = GC->gen1_pages[PAGE_BIG]; - page->prev = NULL; - if(page->next) page->next->prev = page; - GC->gen1_pages[PAGE_BIG] = page; - /* if we're doing memory accounting, then we need to make sure the - btc_mark is right */ - set_btc_mark(NUM(page->addr) + PREFIX_SIZE); - } + backtrace_new_page(page); - page->marked_on = 1; - record_backtrace(page, PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE)); - GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page)); - /* Finally, we want to add this to our mark queue, so we can - propagate its pointers */ - push_ptr(p); + /* add to gen1 */ + page->next = GC->gen1_pages[PAGE_BIG]; + page->prev = NULL; + if(page->next) page->next->prev = page; + GC->gen1_pages[PAGE_BIG] = page; + /* if we're doing memory accounting, then we need to make sure the + btc_mark is right */ + set_btc_mark(NUM(page->addr) + PREFIX_SIZE); + } + + page->marked_on = 1; + record_backtrace(page, PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE)); + GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page)); + /* Finally, we want to add this to our mark queue, so we can + propagate its pointers */ + push_ptr(p); } else GCDEBUG((DEBUGOUTF, "Not marking %p on big %p (already marked)\n", - p, page)); + p, page)); } else { struct objhead *ohead = (struct objhead *)(NUM(p) - WORD_SIZE); if(!ohead->mark) { - /* what we do next depends on whether this is a gen0 or gen1 - object */ - if(page->generation) { - /* this is a generation 1 object. This means we are not going - to move it, we don't have to check to see if it's an atomic - object masquerading as a tagged object, etc. So all we do - is add the pointer to the mark queue and note on the page - that we marked something on it*/ - if((NUM(page->addr) + page->previous_size) <= NUM(p)) { - GCDEBUG((DEBUGOUTF, "Marking %p (leaving alone)\n", p)); - ohead->mark = 1; - page->marked_on = 1; - page->previous_size = PREFIX_SIZE; - page->live_size += ohead->size; - record_backtrace(page, p); - push_ptr(p); - } else GCDEBUG((DEBUGOUTF, "Not marking %p (it's old; %p / %i)\n", - p, page, page->previous_size)); - } else { - /* this is a generation 0 object. This means that we do have - to do all of the above. Fun, fun, fun. */ - unsigned short type = ohead->type; - struct mpage *work; - size_t size; - void *newplace; + /* what we do next depends on whether this is a gen0 or gen1 + object */ + if(page->generation) { + /* this is a generation 1 object. This means we are not going + to move it, we don't have to check to see if it's an atomic + object masquerading as a tagged object, etc. So all we do + is add the pointer to the mark queue and note on the page + that we marked something on it*/ + if((NUM(page->addr) + page->previous_size) <= NUM(p)) { + GCDEBUG((DEBUGOUTF, "Marking %p (leaving alone)\n", p)); + ohead->mark = 1; + page->marked_on = 1; + page->previous_size = PREFIX_SIZE; + page->live_size += ohead->size; + record_backtrace(page, p); + push_ptr(p); + } else GCDEBUG((DEBUGOUTF, "Not marking %p (it's old; %p / %i)\n", + p, page, page->previous_size)); + } else { + /* this is a generation 0 object. This means that we do have + to do all of the above. Fun, fun, fun. */ + unsigned short type = ohead->type; + struct mpage *work; + size_t size; + void *newplace; - /* first check to see if this is an atomic object masquerading - as a tagged object; if it is, then convert it */ - if(type == PAGE_TAGGED) { + /* first check to see if this is an atomic object masquerading + as a tagged object; if it is, then convert it */ + if(type == PAGE_TAGGED) { if((unsigned long)gc->mark_table[*(unsigned short*)p] < PAGE_TYPES) - type = ohead->type = (int)(unsigned long)gc->mark_table[*(unsigned short*)p]; + type = ohead->type = (int)(unsigned long)gc->mark_table[*(unsigned short*)p]; } - /* now set us up for the search for where to put this thing */ - work = GC->gen1_pages[type]; - size = gcWORDS_TO_BYTES(ohead->size); + /* now set us up for the search for where to put this thing */ + work = GC->gen1_pages[type]; + size = gcWORDS_TO_BYTES(ohead->size); - /* search for a page with the space to spare */ - if (work && ((work->size + size) >= APAGE_SIZE)) - work = NULL; + /* search for a page with the space to spare */ + if (work && ((work->size + size) >= APAGE_SIZE)) + work = NULL; - /* now either fetch where we're going to put this object or make - a new page if we couldn't find a page with space to spare */ - if(work) { - pagemap_add(work); - work->marked_on = 1; + /* now either fetch where we're going to put this object or make + a new page if we couldn't find a page with space to spare */ + if(work) { + pagemap_add(work); + work->marked_on = 1; if (work->mprotected) { work->mprotected = 0; protect_pages(work->addr, APAGE_SIZE, 1); } newplace = PTR(NUM(work->addr) + work->size); - } else { - /* Allocate and prep the page */ + } else { + /* Allocate and prep the page */ void *addr; - work = malloc_mpage(); + work = malloc_mpage(); addr = malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE); if (!addr) out_of_memory(); work->addr = addr; - work->generation = 1; - work->page_type = type; - work->size = work->previous_size = PREFIX_SIZE; - work->marked_on = 1; - backtrace_new_page(work); - work->next = GC->gen1_pages[type]; - work->prev = NULL; - if(work->next) - work->next->prev = work; - pagemap_add(work); - GC->gen1_pages[type] = work; - newplace = PTR(NUM(work->addr) + PREFIX_SIZE); - } + work->generation = 1; + work->page_type = type; + work->size = work->previous_size = PREFIX_SIZE; + work->marked_on = 1; + backtrace_new_page(work); + work->next = GC->gen1_pages[type]; + work->prev = NULL; + if(work->next) + work->next->prev = work; + pagemap_add(work); + GC->gen1_pages[type] = work; + newplace = PTR(NUM(work->addr) + PREFIX_SIZE); + } + + /* update the size */ + work->size += size; + work->has_new = 1; - /* update the size */ - work->size += size; - work->has_new = 1; - /* transfer the object */ - memcpy(newplace, (const void *)ohead, size); - /* mark the old location as marked and moved, and the new location - as marked */ - ohead->mark = ohead->moved = 1; - ((struct objhead *)newplace)->mark = 1; - /* if we're doing memory accounting, then we need the btc_mark - to be set properly */ - set_btc_mark(newplace); - /* drop the new location of the object into the forwarding space - and into the mark queue */ - newplace = PTR(NUM(newplace) + WORD_SIZE); - /* record why we marked this one (if enabled) */ - record_backtrace(work, newplace); - /* set forwarding pointer */ - GCDEBUG((DEBUGOUTF,"Marking %p (moved to %p on page %p)\n", - p, newplace, work)); + memcpy(newplace, (const void *)ohead, size); + /* mark the old location as marked and moved, and the new location + as marked */ + ohead->mark = ohead->moved = 1; + ((struct objhead *)newplace)->mark = 1; + /* if we're doing memory accounting, then we need the btc_mark + to be set properly */ + set_btc_mark(newplace); + /* drop the new location of the object into the forwarding space + and into the mark queue */ + newplace = PTR(NUM(newplace) + WORD_SIZE); + /* record why we marked this one (if enabled) */ + record_backtrace(work, newplace); + /* set forwarding pointer */ + GCDEBUG((DEBUGOUTF,"Marking %p (moved to %p on page %p)\n", + p, newplace, work)); *(void**)p = newplace; - push_ptr(newplace); - } + push_ptr(newplace); + } } else GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p)); } } else GCDEBUG((DEBUGOUTF,"Not marking %p (no page)\n",p)); @@ -1674,7 +1672,7 @@ void *GC_resolve(void *p) if(!page || page->big_page) return p; - + info = (struct objhead *)(NUM(p) - WORD_SIZE); if(info->mark && info->moved) return *(void**)p; @@ -1713,12 +1711,12 @@ void GC_fixup(void *pp) #ifdef MZ_GC_BACKTRACE # define trace_page_t struct mpage # define trace_page_type(page) (page)->page_type -static void *trace_pointer_start(struct mpage *page, void *p) { - if (page->big_page) - return PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); - else - return p; -} + static void *trace_pointer_start(struct mpage *page, void *p) { + if (page->big_page) + return PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); + else + return p; + } # define TRACE_PAGE_TAGGED PAGE_TAGGED # define TRACE_PAGE_ARRAY PAGE_ARRAY # define TRACE_PAGE_TAGGED_ARRAY PAGE_TARRAY @@ -1738,12 +1736,12 @@ static void *trace_pointer_start(struct mpage *page, void *p) { #define MAX_DUMP_TAG 256 void GC_dump_with_traces(int flags, - GC_get_type_name_proc get_type_name, - GC_get_xtagged_name_proc get_xtagged_name, - GC_for_each_found_proc for_each_found, - short trace_for_tag, - GC_print_tagged_value_proc print_tagged_value, - int path_length_limit) + GC_get_type_name_proc get_type_name, + GC_get_xtagged_name_proc get_xtagged_name, + GC_for_each_found_proc for_each_found, + short trace_for_tag, + GC_print_tagged_value_proc print_tagged_value, + int path_length_limit) { struct mpage *page; int i; @@ -1760,20 +1758,20 @@ void GC_dump_with_traces(int flags, for (page = GC->gen1_pages[PAGE_TAGGED]; page; page = page->next) { void **start = PPTR(NUM(page->addr) + PREFIX_SIZE); void **end = PPTR(NUM(page->addr) + page->size); - + while(start < end) { struct objhead *info = (struct objhead *)start; if(!info->dead) { - unsigned short tag = *(unsigned short *)(start + 1); - if (tag < MAX_DUMP_TAG) { - counts[tag]++; - sizes[tag] += info->size; - } - if (tag == trace_for_tag) { - register_traced_object(start + 1); - if (for_each_found) - for_each_found(start + 1); - } + unsigned short tag = *(unsigned short *)(start + 1); + if (tag < MAX_DUMP_TAG) { + counts[tag]++; + sizes[tag] += info->size; + } + if (tag == trace_for_tag) { + register_traced_object(start + 1); + if (for_each_found) + for_each_found(start + 1); + } } start += info->size; } @@ -1783,14 +1781,14 @@ void GC_dump_with_traces(int flags, void **start = PPTR(NUM(page->addr) + PREFIX_SIZE); unsigned short tag = *(unsigned short *)(start + 1); if (tag < MAX_DUMP_TAG) { - counts[tag]++; + counts[tag]++; sizes[tag] += gcBYTES_TO_WORDS(page->size); } if ((tag == trace_for_tag) - || (tag == -trace_for_tag)) { - register_traced_object(start + 1); - if (for_each_found) - for_each_found(start + 1); + || (tag == -trace_for_tag)) { + register_traced_object(start + 1); + if (for_each_found) + for_each_found(start + 1); } } } @@ -1800,12 +1798,12 @@ void GC_dump_with_traces(int flags, if (counts[i]) { char *tn, buf[256]; if (get_type_name) - tn = get_type_name((Type_Tag)i); + tn = get_type_name((Type_Tag)i); else - tn = NULL; + tn = NULL; if (!tn) { - sprintf(buf, "unknown,%d", i); - tn = buf; + sprintf(buf, "unknown,%d", i); + tn = buf; } GCPRINT(GCOUTF, " %20.20s: %10ld %10ld\n", tn, counts[i], gcWORDS_TO_BYTES(sizes[i])); } @@ -1813,24 +1811,24 @@ void GC_dump_with_traces(int flags, GCPRINT(GCOUTF, "End MzScheme3m\n"); GCWARN((GCOUTF, "Generation 0: %li of %li bytes used\n", gen0_size_in_use(), GC->gen0.max_size)); - + for(i = 0; i < PAGE_TYPES; i++) { unsigned long total_use = 0, count = 0; - + for(page = GC->gen1_pages[i]; page; page = page->next) { total_use += page->size; count++; } GCWARN((GCOUTF, "Generation 1 [%s]: %li bytes used in %li pages\n", - type_name[i], total_use, count)); + type_name[i], total_use, count)); } GCWARN((GCOUTF,"\n")); GCWARN((GCOUTF,"Current memory use: %li\n", GC_get_memory_use(NULL))); GCWARN((GCOUTF,"Peak memory use after a collection: %li\n", GC->peak_memory_use)); GCWARN((GCOUTF,"Allocated (+reserved) page sizes: %li (+%li)\n", - GC->used_pages * APAGE_SIZE, - GC->actual_pages_size - (GC->used_pages * APAGE_SIZE))); + GC->used_pages * APAGE_SIZE, + GC->actual_pages_size - (GC->used_pages * APAGE_SIZE))); GCWARN((GCOUTF,"# of major collections: %li\n", GC->num_major_collects)); GCWARN((GCOUTF,"# of minor collections: %li\n", GC->num_minor_collects)); GCWARN((GCOUTF,"# of installed finalizers: %i\n", num_fnls)); @@ -1878,14 +1876,14 @@ static void prepare_pages_for_collection(void) { struct mpage *work; int i; - + GCDEBUG((DEBUGOUTF, "PREPPING PAGES.\n")); if(GC->gc_full) { /* we need to make sure that previous_size for every page is reset, so we don't accidentally screw up the mark routine */ if (GC->generations_available) { for(i = 0; i < PAGE_TYPES; i++) - for(work = GC->gen1_pages[i]; work; work = work->next) { + for(work = GC->gen1_pages[i]; work; work = work->next) { if (work->mprotected) { work->mprotected = 0; add_protect_page_range(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, APAGE_SIZE, 1); @@ -1895,15 +1893,15 @@ static void prepare_pages_for_collection(void) } for(i = 0; i < PAGE_TYPES; i++) for(work = GC->gen1_pages[i]; work; work = work->next) { - work->live_size = 0; - work->previous_size = PREFIX_SIZE; + work->live_size = 0; + work->previous_size = PREFIX_SIZE; } } else { /* if we're not doing a major collection, then we need to remove all the pages in GC->gen1_pages[] from the page map */ for(i = 0; i < PAGE_TYPES; i++) for(work = GC->gen1_pages[i]; work; work = work->next) { - if (GC->generations_available) { + if (GC->generations_available) { if (work->back_pointers) { if (work->mprotected) { work->mprotected = 0; @@ -1911,7 +1909,7 @@ static void prepare_pages_for_collection(void) } } } - pagemap_remove(work); + pagemap_remove(work); } flush_protect_page_ranges(1); } @@ -1927,40 +1925,40 @@ static void mark_backpointers(void) which point backwards into generation 0, since they're roots. */ for(i = 0; i < PAGE_TYPES; i++) { for(work = GC->gen1_pages[i]; work; work = work->next) { - if(work->back_pointers) { - /* these pages are guaranteed not to be write protected, because - if they were, they wouldn't have this bit set */ - work->marked_on = 1; - work->previous_size = PREFIX_SIZE; - pagemap_add(work); - if(work->big_page) { - work->big_page = 2; - push_ptr(PPTR(NUM(work->addr) + PREFIX_SIZE)); - } else { - if(work->page_type != PAGE_ATOMIC) { - void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); - void **end = PPTR(NUM(work->addr) + work->size); - - while(start < end) { - struct objhead *info = (struct objhead *)start; - if(!info->dead) { - info->mark = 1; - /* This must be a push_ptr, and not a direct call to - internal_mark. This is because we need every object - in the older heap to be marked out of and noted as - marked before we do anything else */ - push_ptr(start + 1); - } - start += info->size; - } - } - } - work->previous_size = PREFIX_SIZE; - } else { - GCDEBUG((DEBUGOUTF,"Setting previous_size on %p to %i\n", work, - work->size)); - work->previous_size = work->size; - } + if(work->back_pointers) { + /* these pages are guaranteed not to be write protected, because + if they were, they wouldn't have this bit set */ + work->marked_on = 1; + work->previous_size = PREFIX_SIZE; + pagemap_add(work); + if(work->big_page) { + work->big_page = 2; + push_ptr(PPTR(NUM(work->addr) + PREFIX_SIZE)); + } else { + if(work->page_type != PAGE_ATOMIC) { + void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); + void **end = PPTR(NUM(work->addr) + work->size); + + while(start < end) { + struct objhead *info = (struct objhead *)start; + if(!info->dead) { + info->mark = 1; + /* This must be a push_ptr, and not a direct call to + internal_mark. This is because we need every object + in the older heap to be marked out of and noted as + marked before we do anything else */ + push_ptr(start + 1); + } + start += info->size; + } + } + } + work->previous_size = PREFIX_SIZE; + } else { + GCDEBUG((DEBUGOUTF,"Setting previous_size on %p to %i\n", work, + work->size)); + work->previous_size = work->size; + } } } } @@ -1988,7 +1986,7 @@ struct mpage *allocate_compact_target(struct mpage *work) work->next = npage; if (npage->next) npage->next->prev = npage; - + return npage; } @@ -2005,84 +2003,84 @@ inline static void do_heap_compact(void) /* Start from the end: */ if (work) { while (work->next) - work = work->next; + work = work->next; } npage = work; while(work) { if(work->marked_on && !work->has_new) { - /* then determine if we actually want to do compaction */ - if(should_compact_page(gcWORDS_TO_BYTES(work->live_size),work->size)) { - void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); - void **end = PPTR(NUM(work->addr) + work->size); - void **newplace; - unsigned long avail; + /* then determine if we actually want to do compaction */ + if(should_compact_page(gcWORDS_TO_BYTES(work->live_size),work->size)) { + void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); + void **end = PPTR(NUM(work->addr) + work->size); + void **newplace; + unsigned long avail; - GCDEBUG((DEBUGOUTF, "Compacting page %p: new version at %p\n", - work, npage)); + GCDEBUG((DEBUGOUTF, "Compacting page %p: new version at %p\n", + work, npage)); - if (npage == work) { - /* Need to insert a page: */ - npage = allocate_compact_target(work); - } - avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size); - newplace = PPTR(NUM(npage->addr) + npage->size); + if (npage == work) { + /* Need to insert a page: */ + npage = allocate_compact_target(work); + } + avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size); + newplace = PPTR(NUM(npage->addr) + npage->size); - while(start < end) { - struct objhead *info; + while(start < end) { + struct objhead *info; - info = (struct objhead *)start; + info = (struct objhead *)start; - if(info->mark) { - while (avail <= info->size) { - npage->size = NUM(newplace) - NUM(npage->addr); - do { - npage = npage->prev; - } while (!npage->marked_on || npage->has_new); - if (npage == work) - npage = allocate_compact_target(work); - avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size); - newplace = PPTR(NUM(npage->addr) + npage->size); - } + if(info->mark) { + while (avail <= info->size) { + npage->size = NUM(newplace) - NUM(npage->addr); + do { + npage = npage->prev; + } while (!npage->marked_on || npage->has_new); + if (npage == work) + npage = allocate_compact_target(work); + avail = gcBYTES_TO_WORDS(APAGE_SIZE - npage->size); + newplace = PPTR(NUM(npage->addr) + npage->size); + } if (npage->mprotected) { npage->mprotected = 0; protect_pages(npage->addr, APAGE_SIZE, 1); } - GCDEBUG((DEBUGOUTF,"Moving size %i object from %p to %p\n", - gcWORDS_TO_BYTES(info->size), start+1, newplace+1)); - memcpy(newplace, start, gcWORDS_TO_BYTES(info->size)); - info->moved = 1; - *(PPTR(NUM(start) + WORD_SIZE)) = PTR(NUM(newplace) + WORD_SIZE); - copy_backtrace_source(npage, newplace, work, start); - newplace += info->size; - avail -= info->size; - } - start += info->size; - } - npage->size = NUM(newplace) - NUM(npage->addr); + GCDEBUG((DEBUGOUTF,"Moving size %i object from %p to %p\n", + gcWORDS_TO_BYTES(info->size), start+1, newplace+1)); + memcpy(newplace, start, gcWORDS_TO_BYTES(info->size)); + info->moved = 1; + *(PPTR(NUM(start) + WORD_SIZE)) = PTR(NUM(newplace) + WORD_SIZE); + copy_backtrace_source(npage, newplace, work, start); + newplace += info->size; + avail -= info->size; + } + start += info->size; + } + npage->size = NUM(newplace) - NUM(npage->addr); - prev = work->prev; + prev = work->prev; - if(prev) prev->next = work->next; else GC->gen1_pages[i] = work->next; - if(work->next) work->next->prev = prev; + if(prev) prev->next = work->next; else GC->gen1_pages[i] = work->next; + if(work->next) work->next->prev = prev; - /* push work onto GC->release_pages */ - work->next = GC->release_pages; - GC->release_pages = work; + /* push work onto GC->release_pages */ + work->next = GC->release_pages; + GC->release_pages = work; - /* add the old page to the page map so fixups can find forwards */ - pagemap_add(work); + /* add the old page to the page map so fixups can find forwards */ + pagemap_add(work); - work = prev; - } else { - work = work->prev; - } + work = prev; + } else { + work = work->prev; + } } else { if (npage == work) npage = npage->prev; - work = work->prev; + work = work->prev; } } } @@ -2097,120 +2095,125 @@ static void repair_heap(void) for(i = 0; i < PAGE_TYPES; i++) { for(page = GC->gen1_pages[i]; page; page = page->next) { if(page->marked_on) { - page->has_new = 0; - /* these are guaranteed not to be protected */ - if(page->big_page) { - void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); - void **end = PPTR(NUM(page->addr) + page->size); + page->has_new = 0; + /* these are guaranteed not to be protected */ + if(page->big_page) { + void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); + void **end = PPTR(NUM(page->addr) + page->size); - GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n", - page, start)); - page->big_page = 1; /* remove the mark */ - switch(page->page_type) { - case PAGE_TAGGED: - gc->fixup_table[*(unsigned short*)start](start); - break; - case PAGE_ATOMIC: break; - case PAGE_ARRAY: - while(start < end) gcFIXUP(*(start++)); - break; - case PAGE_XTAGGED: - GC_fixup_xtagged(start); - break; - case PAGE_TARRAY: { - unsigned short tag = *(unsigned short *)start; - end -= INSET_WORDS; - while(start < end) start += gc->fixup_table[tag](start); - break; - } - } - } else { - void **start = PPTR(NUM(page->addr) + page->previous_size); - void **end = PPTR(NUM(page->addr) + page->size); + GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n", + page, start)); + page->big_page = 1; /* remove the mark */ + switch(page->page_type) { + case PAGE_TAGGED: + gc->fixup_table[*(unsigned short*)start](start); + break; + case PAGE_ATOMIC: break; + case PAGE_ARRAY: + while(start < end) gcFIXUP(*(start++)); + break; + case PAGE_XTAGGED: + GC_fixup_xtagged(start); + break; + case PAGE_TARRAY: { + unsigned short tag = *(unsigned short *)start; + end -= INSET_WORDS; + while(start < end) start += gc->fixup_table[tag](start); + break; + } + } + } else { + void **start = PPTR(NUM(page->addr) + page->previous_size); + void **end = PPTR(NUM(page->addr) + page->size); - GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n", - page, start)); - switch(page->page_type) { - case PAGE_TAGGED: - while(start < end) { - struct objhead *info = (struct objhead *)start; - - if(info->mark) { - info->mark = 0; - gc->fixup_table[*(unsigned short*)(start+1)](start+1); - } else { - info->dead = 1; - } - start += info->size; - } - break; - case PAGE_ATOMIC: - while(start < end) { - struct objhead *info = (struct objhead *)start; - if(info->mark) { - info->mark = 0; - } else info->dead = 1; - start += info->size; - } - break; - case PAGE_ARRAY: - while(start < end) { - struct objhead *info = (struct objhead *)start; - size_t size = info->size; - if(info->mark) { - void **tempend = (start++) + size; - while(start < tempend) gcFIXUP(*start++); - info->mark = 0; - } else { - info->dead = 1; - start += size; - } - } - break; - case PAGE_TARRAY: - while(start < end) { - struct objhead *info = (struct objhead *)start; - size_t size = info->size; - if(info->mark) { - void **tempend = (start++) + (size - INSET_WORDS); - unsigned short tag = *(unsigned short*)start; - while(start < tempend) - start += gc->fixup_table[tag](start); - info->mark = 0; - start = PPTR(info) + size; - } else { - info->dead = 1; - start += size; - } - } - break; - case PAGE_XTAGGED: - while(start < end) { - struct objhead *info = (struct objhead *)start; - if(info->mark) { - GC_fixup_xtagged(start + 1); - info->mark = 0; - } else info->dead = 1; - start += info->size; - } - } - } + GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n", + page, start)); + switch(page->page_type) { + case PAGE_TAGGED: + while(start < end) { + struct objhead *info = (struct objhead *)start; + + if(info->mark) { + info->mark = 0; + gc->fixup_table[*(unsigned short*)(start+1)](start+1); + } else { + info->dead = 1; + } + start += info->size; + } + break; + case PAGE_ATOMIC: + while(start < end) { + struct objhead *info = (struct objhead *)start; + if(info->mark) { + info->mark = 0; + } else info->dead = 1; + start += info->size; + } + break; + case PAGE_ARRAY: + while(start < end) { + struct objhead *info = (struct objhead *)start; + size_t size = info->size; + if(info->mark) { + void **tempend = (start++) + size; + while(start < tempend) gcFIXUP(*start++); + info->mark = 0; + } else { + info->dead = 1; + start += size; + } + } + break; + case PAGE_TARRAY: + while(start < end) { + struct objhead *info = (struct objhead *)start; + size_t size = info->size; + if(info->mark) { + void **tempend = (start++) + (size - INSET_WORDS); + unsigned short tag = *(unsigned short*)start; + while(start < tempend) + start += gc->fixup_table[tag](start); + info->mark = 0; + start = PPTR(info) + size; + } else { + info->dead = 1; + start += size; + } + } + break; + case PAGE_XTAGGED: + while(start < end) { + struct objhead *info = (struct objhead *)start; + if(info->mark) { + GC_fixup_xtagged(start + 1); + info->mark = 0; + } else info->dead = 1; + start += info->size; + } + } + } } else GCDEBUG((DEBUGOUTF,"Not Cleaning page %p\n", page)); } } } +static inline void gen1_free_mpage(mpage *page) { + size_t real_page_size = page->big_page ? round_to_apage_size(page->size) : APAGE_SIZE; + pagemap_remove(page); + free_backtrace(page); + free_pages(page->addr, real_page_size); + free_mpage(page); +} + static inline void cleanup_vacated_pages(NewGC *gc) { mpage *pages = gc->release_pages; /* Free pages vacated by compaction: */ while (pages) { - mpage *prev = pages->next; - pagemap_remove(pages); - free_backtrace(pages); - free_pages(pages->addr, APAGE_SIZE); - free_mpage(pages); - pages = prev; + mpage *next = pages->next; + gen1_free_mpage(pages); + pages = next; } gc->release_pages = NULL; } @@ -2236,7 +2239,7 @@ static void clean_up_heap(void) NewGC *gc = GC; gc->memory_in_use = 0; - + gen0_free_big_pages(); for(i = 0; i < PAGE_TYPES; i++) { @@ -2245,30 +2248,27 @@ static void clean_up_heap(void) if(gc->gc_full) { work = GC->gen1_pages[i]; while(work) { - if(!work->marked_on) { - struct mpage *next = work->next; - - if(prev) prev->next = next; else GC->gen1_pages[i] = next; - if(next) work->next->prev = prev; - pagemap_remove(work); - free_backtrace(work); - free_pages(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE); - free_mpage(work); - work = next; - } else { - pagemap_add(work); - work->back_pointers = work->marked_on = 0; - prev = work; - work = work->next; - } + if(!work->marked_on) { + struct mpage *next = work->next; + + if(prev) prev->next = next; else GC->gen1_pages[i] = next; + if(next) work->next->prev = prev; + gen1_free_mpage(work); + work = next; + } else { + pagemap_add(work); + work->back_pointers = work->marked_on = 0; + prev = work; + work = work->next; + } } } else { for(work = GC->gen1_pages[i]; work; work = work->next) { - pagemap_add(work); - work->back_pointers = work->marked_on = 0; + pagemap_add(work); + work->back_pointers = work->marked_on = 0; } } - + /* since we're here anyways, compute the total memory use */ for(work = GC->gen1_pages[i]; work; work = work->next) gc->memory_in_use += work->size; @@ -2285,7 +2285,7 @@ static void protect_old_pages(void) for(i = 0; i < PAGE_TYPES; i++) if(i != PAGE_ATOMIC) for(page = GC->gen1_pages[i]; page; page = page->next) - if(page->page_type != PAGE_ATOMIC) { + if(page->page_type != PAGE_ATOMIC) { if (!page->mprotected) { page->mprotected = 1; add_protect_page_range(page->addr, page->size, APAGE_SIZE, 0); @@ -2300,7 +2300,7 @@ extern double scheme_get_inexact_milliseconds(void); # define TIME_DECLS() double start, task_start # define TIME_INIT() start = task_start = scheme_get_inexact_milliseconds(); fprintf(stderr, "GC (%d):\n", GC->gc_full) # define TIME_STEP(task) fprintf(stderr, " %s: %lf\n", task, scheme_get_inexact_milliseconds() - task_start); \ - task_start = scheme_get_inexact_milliseconds() + task_start = scheme_get_inexact_milliseconds() # define TIME_DONE() fprintf(stderr, " Total: %lf\n", scheme_get_inexact_milliseconds() - start) #else # define TIME_DECLS() /**/ @@ -2331,13 +2331,13 @@ static void garbage_collect(int force_full) || (since_last_full > 100) || (gc->memory_in_use > (2 * last_full_mem_use)); #if 0 printf("Collection %li (full = %i): %i / %i / %i / %i %ld\n", number, - gc->gc_full, force_full, !generations_available, - (since_last_full > 100), (gc->memory_in_use > (2 * last_full_mem_use)), - last_full_mem_use); + gc->gc_full, force_full, !generations_available, + (since_last_full > 100), (gc->memory_in_use > (2 * last_full_mem_use)), + last_full_mem_use); #endif next_gc_full = gc->gc_full; - + if (gc->full_needed_for_finalization) { gc->full_needed_for_finalization= 0; gc->gc_full = 1; @@ -2404,7 +2404,7 @@ static void garbage_collect(int force_full) removed */ clear_stack_pages(); #endif - + TIME_STEP("marked"); zero_weak_boxes(); @@ -2548,12 +2548,12 @@ static void dump_stack_pos(void *a) # undef X_source void GC_dump_variable_stack(void **var_stack, - long delta, - void *limit, - void *stack_mem, - GC_get_type_name_proc get_type_name, - GC_get_xtagged_name_proc get_xtagged_name, - GC_print_tagged_value_proc print_tagged_value) + long delta, + void *limit, + void *stack_mem, + GC_get_type_name_proc get_type_name, + GC_get_xtagged_name_proc get_xtagged_name, + GC_print_tagged_value_proc print_tagged_value) { stack_get_type_name = get_type_name; stack_get_xtagged_name = get_xtagged_name; @@ -2582,14 +2582,8 @@ void GC_free_all(void) next = work->next; if (work->mprotected) - protect_pages(work->addr, - work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, - 1); - - pagemap_remove(work); - free_backtrace(work); - free_pages(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE); - free_mpage(work); + protect_pages(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, 1); + gen1_free_mpage(work); } }