GC: adjust backpointer marking to not touch other page records

Also, free medium pages for a terminated place.
This commit is contained in:
Matthew Flatt 2015-09-07 20:42:54 -06:00
parent 9372862ee2
commit 37c4ea4720
2 changed files with 145 additions and 103 deletions

View File

@ -1086,7 +1086,7 @@ static void *allocate_big(const size_t request_size_bytes, int type)
bpage->addr = addr; bpage->addr = addr;
bpage->size = allocate_size; bpage->size = allocate_size;
bpage->size_class = 2; bpage->size_class = SIZE_CLASS_BIG_PAGE;
bpage->page_type = type; bpage->page_type = type;
bpage->mmu_src_block = src_block; bpage->mmu_src_block = src_block;
GCVERBOSEPAGE(gc, "NEW BIG PAGE", bpage); GCVERBOSEPAGE(gc, "NEW BIG PAGE", bpage);
@ -1095,6 +1095,7 @@ static void *allocate_big(const size_t request_size_bytes, int type)
bpage->next = gc->gen0.big_pages; bpage->next = gc->gen0.big_pages;
if(bpage->next) bpage->next->prev = bpage; if(bpage->next) bpage->next->prev = bpage;
gc->gen0.big_pages = bpage; gc->gen0.big_pages = bpage;
gc->num_gen1_pages++;
if (gc->saved_allocator) { if (gc->saved_allocator) {
/* MESSAGE ALLOCATION: orphan this page from the current GC; this /* MESSAGE ALLOCATION: orphan this page from the current GC; this
@ -1129,7 +1130,7 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p
page->addr = addr; page->addr = addr;
page->mmu_src_block = src_block; page->mmu_src_block = src_block;
page->size = sz; page->size = sz;
page->size_class = 1; page->size_class = SIZE_CLASS_MED_PAGE;
page->page_type = PAGE_BIG; page->page_type = PAGE_BIG;
MED_NEXT_SEARCH_SLOT(page) = PREFIX_SIZE; MED_NEXT_SEARCH_SLOT(page) = PREFIX_SIZE;
page->live_size = sz; page->live_size = sz;
@ -1148,6 +1149,8 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p
gc->med_pages[ty][pos] = page; gc->med_pages[ty][pos] = page;
gc->med_freelist_pages[ty][pos] = page; gc->med_freelist_pages[ty][pos] = page;
gc->num_gen1_pages++;
if (gc->saved_allocator) { /* see MESSAGE ALLOCATION above */ if (gc->saved_allocator) { /* see MESSAGE ALLOCATION above */
orphan_page_accounting(gc, APAGE_SIZE); orphan_page_accounting(gc, APAGE_SIZE);
#ifdef POINTER_OWNERSHIP_CHECK #ifdef POINTER_OWNERSHIP_CHECK
@ -1266,7 +1269,7 @@ inline static mpage *gen0_create_new_nursery_mpage(NewGC *gc, const size_t page_
page = malloc_mpage(); page = malloc_mpage();
page->addr = addr; page->addr = addr;
page->mmu_src_block = src_block; page->mmu_src_block = src_block;
page->size_class = 0; page->size_class = SIZE_CLASS_SMALL_PAGE;
page->size = PREFIX_SIZE; page->size = PREFIX_SIZE;
GEN0_ALLOC_SIZE(page) = page_size; GEN0_ALLOC_SIZE(page) = page_size;
@ -2776,6 +2779,13 @@ inline static int page_mmu_protectable(mpage *page) {
return (page->page_type == PAGE_ATOMIC) ? MMU_NON_PROTECTABLE : MMU_PROTECTABLE; return (page->page_type == PAGE_ATOMIC) ? MMU_NON_PROTECTABLE : MMU_PROTECTABLE;
} }
static void set_has_back_pointers(NewGC *gc, mpage *page)
{
page->back_pointers = 1;
page->backpointer_next = gc->backpointer_next;
gc->backpointer_next = page;
}
static int designate_modified_gc(NewGC *gc, void *p) static int designate_modified_gc(NewGC *gc, void *p)
{ {
mpage *page = pagemap_find_page(gc->page_maps, p); mpage *page = pagemap_find_page(gc->page_maps, p);
@ -2789,7 +2799,8 @@ static int designate_modified_gc(NewGC *gc, void *p)
page->mprotected = 0; page->mprotected = 0;
mmu_write_unprotect_page(gc->mmu, page->addr, real_page_size(page)); mmu_write_unprotect_page(gc->mmu, page->addr, real_page_size(page));
GC_MP_CNT_INC(mp_write_barrier_cnt); GC_MP_CNT_INC(mp_write_barrier_cnt);
page->back_pointers = 1; if (!page->back_pointers)
set_has_back_pointers(gc, page);
gc->modified_unprotects++; gc->modified_unprotects++;
return 1; return 1;
} else { } else {
@ -3477,7 +3488,7 @@ void GC_mark2(const void *const_p, struct NewGC *gc)
return; return;
} }
/* in this case, it has not. So we want to mark it, first off. */ /* in this case, it has not. So we want to mark it, first off. */
page->size_class = 3; page->size_class = SIZE_CLASS_BIG_PAGE_MARKED;
/* if this is in the nursery, we want to move it out of the nursery */ /* if this is in the nursery, we want to move it out of the nursery */
if((page->generation == AGE_GEN_0) && !is_a_master_page) if((page->generation == AGE_GEN_0) && !is_a_master_page)
@ -3599,6 +3610,7 @@ void GC_mark2(const void *const_p, struct NewGC *gc)
work->next->prev = work; work->next->prev = work;
pagemap_add(gc->page_maps, work); pagemap_add(gc->page_maps, work);
gc->gen1_pages[type] = work; gc->gen1_pages[type] = work;
gc->num_gen1_pages++;
newplace = PAGE_TO_OBJHEAD(work); newplace = PAGE_TO_OBJHEAD(work);
GCVERBOSEPAGE(gc, "NEW SMALL GEN1 PAGE", work); GCVERBOSEPAGE(gc, "NEW SMALL GEN1 PAGE", work);
} }
@ -4165,31 +4177,34 @@ static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc)
static void mark_backpointers(NewGC *gc) static void mark_backpointers(NewGC *gc)
{ {
if (!gc->gc_full) {
mpage *work;
int i, ty, traversed = 0, skipped = 0;
/* if this is not a full collection, then we need to mark any pointers /* if this is not a full collection, then we need to mark any pointers
that point backwards into generation 0, since they're roots. */ that point backwards into generation 0, since they're roots. */
for (i = 0; i < PAGE_TYPES; i++) { if (!gc->gc_full) {
for (work = gc->gen1_pages[i]; work; work = work->next) { mpage *work;
if (work->back_pointers) { int traversed = 0;
for (work = gc->backpointer_next; work; work = work->backpointer_next) {
GC_ASSERT(work->back_pointers);
if (work->mprotected) { if (work->mprotected) {
/* expected only if QUEUED_MPROTECT_IS_PROMISCUOUS && AGE_GEN_0_TO_GEN_HALF(gc) */ /* expected only if QUEUED_MPROTECT_IS_PROMISCUOUS && AGE_GEN_0_TO_GEN_HALF(gc) */
work->mprotected = 0; work->mprotected = 0;
mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work)); mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work));
} }
work->marked_from = 1; work->marked_from = 1;
if (work->size_class) {
if (work->size_class == SIZE_CLASS_BIG_PAGE) {
/* must be a big page */ /* must be a big page */
work->size_class = 3; work->size_class = SIZE_CLASS_BIG_PAGE_MARKED;
push_ptr(gc, TAG_AS_BIG_PAGE_PTR(BIG_PAGE_TO_OBJECT(work))); push_ptr(gc, TAG_AS_BIG_PAGE_PTR(BIG_PAGE_TO_OBJECT(work)));
} else { } else if (work->size_class == SIZE_CLASS_SMALL_PAGE) {
if (work->page_type != PAGE_ATOMIC) { /* small page */
void **start = PAGE_START_VSS(work); void **start = PAGE_START_VSS(work);
void **end = PAGE_END_VSS(work); void **end = PAGE_END_VSS(work);
while(start < end) { GC_ASSERT(work->page_type != PAGE_ATOMIC);
while (start < end) {
objhead *info = (objhead *)start; objhead *info = (objhead *)start;
if (!info->dead) { if (!info->dead) {
info->mark = 1; info->mark = 1;
@ -4201,32 +4216,14 @@ static void mark_backpointers(NewGC *gc)
} }
start += info->size; start += info->size;
} }
}
}
work->previous_size = PREFIX_SIZE; work->previous_size = PREFIX_SIZE;
traversed++;
} else { } else {
skipped++; /* medium page */
}
}
}
for (ty = 0; ty < MED_PAGE_TYPES; ty++) {
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[ty][i]; work; work = work->next) {
if (work->back_pointers) {
void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size); void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
if (work->mprotected) { GC_ASSERT(work->size_class == SIZE_CLASS_MED_PAGE);
/* expected only if QUEUED_MPROTECT_IS_PROMISCUOUS && AGE_GEN_0_TO_GEN_HALF(gc) */
work->mprotected = 0;
mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work));
}
work->marked_from = 1;
if (ty == MED_PAGE_NONATOMIC) {
while (start <= end) { while (start <= end) {
objhead *info = (objhead *)start; objhead *info = (objhead *)start;
if (!info->dead) { if (!info->dead) {
@ -4237,17 +4234,15 @@ static void mark_backpointers(NewGC *gc)
start += info->size; start += info->size;
} }
} }
traversed++; traversed++;
} else {
skipped++;
}
}
}
} }
gc->minor_old_traversed += traversed; gc->minor_old_traversed += traversed;
gc->minor_old_skipped += skipped; gc->minor_old_skipped += (gc->num_gen1_pages - traversed);
} }
gc->backpointer_next = NULL;
} }
mpage *allocate_compact_target(NewGC *gc, mpage *work) mpage *allocate_compact_target(NewGC *gc, mpage *work)
@ -4258,11 +4253,12 @@ mpage *allocate_compact_target(NewGC *gc, mpage *work)
&npage->mmu_src_block, 1); &npage->mmu_src_block, 1);
npage->previous_size = npage->size = PREFIX_SIZE; npage->previous_size = npage->size = PREFIX_SIZE;
npage->generation = AGE_GEN_1; npage->generation = AGE_GEN_1;
npage->size_class = 0; npage->size_class = SIZE_CLASS_SMALL_PAGE;
npage->page_type = work->page_type; npage->page_type = work->page_type;
npage->marked_on = 1; npage->marked_on = 1;
backtrace_new_page(gc, npage); backtrace_new_page(gc, npage);
pagemap_add(gc->page_maps, npage); pagemap_add(gc->page_maps, npage);
gc->num_gen1_pages++;
GCVERBOSEPAGE(gc, "NEW COMPACT PAGE", npage); GCVERBOSEPAGE(gc, "NEW COMPACT PAGE", npage);
/* Link in this new replacement page */ /* Link in this new replacement page */
npage->prev = work; npage->prev = work;
@ -4434,7 +4430,7 @@ static void killing_debug(NewGC *gc, mpage *page, objhead *info) {
} }
#endif #endif
static void repair_mixed_page(NewGC *gc, mpage *page, void **end) static void repair_mixed_page(NewGC *gc, mpage *page, void **end, int track_back_pointers)
{ {
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE); void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
Fixup2_Proc *fixup_table = gc->fixup_table; Fixup2_Proc *fixup_table = gc->fixup_table;
@ -4501,7 +4497,12 @@ static void repair_mixed_page(NewGC *gc, mpage *page, void **end)
} }
} }
page->back_pointers = gc->back_pointers; if (track_back_pointers) {
if (gc->back_pointers)
set_has_back_pointers(gc, page);
else
page->back_pointers = 0;
}
} }
static void repair_heap(NewGC *gc) static void repair_heap(NewGC *gc)
@ -4523,7 +4524,7 @@ static void repair_heap(NewGC *gc)
GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n", GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
page, start)); page, start));
page->size_class = 2; /* remove the mark */ page->size_class = SIZE_CLASS_BIG_PAGE; /* remove the mark */
switch(page->page_type) { switch(page->page_type) {
case PAGE_TAGGED: case PAGE_TAGGED:
fixup_table[*(unsigned short*)start](start, gc); fixup_table[*(unsigned short*)start](start, gc);
@ -4649,7 +4650,10 @@ static void repair_heap(NewGC *gc)
} }
} }
page->back_pointers = gc->back_pointers; if (gc->back_pointers)
set_has_back_pointers(gc, page);
else
page->back_pointers = 0;
} else GCDEBUG((DEBUGOUTF,"Not Cleaning page %p\n", page)); } else GCDEBUG((DEBUGOUTF,"Not Cleaning page %p\n", page));
} }
} }
@ -4658,14 +4662,14 @@ static void repair_heap(NewGC *gc)
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (page = gc->med_pages[ty][i]; page; page = page->next) { for (page = gc->med_pages[ty][i]; page; page = page->next) {
if (page->marked_on || page->marked_from) if (page->marked_on || page->marked_from)
repair_mixed_page(gc, page, PPTR(NUM(page->addr) + APAGE_SIZE - page->size)); repair_mixed_page(gc, page, PPTR(NUM(page->addr) + APAGE_SIZE - page->size), 1);
} }
} }
} }
for (page = gc->gen_half.pages; page; page = page->next) { for (page = gc->gen_half.pages; page; page = page->next) {
GC_ASSERT(page->generation == AGE_GEN_HALF); GC_ASSERT(page->generation == AGE_GEN_HALF);
repair_mixed_page(gc, page, PPTR(NUM(page->addr) + page->size - 1)); repair_mixed_page(gc, page, PPTR(NUM(page->addr) + page->size - 1), 0);
} }
} }
@ -4685,6 +4689,7 @@ static inline void cleanup_vacated_pages(NewGC *gc) {
mpage *next = pages->next; mpage *next = pages->next;
GCVERBOSEPAGE(gc, "Cleaning up vacated", pages); GCVERBOSEPAGE(gc, "Cleaning up vacated", pages);
gen1_free_mpage(pagemap, pages); gen1_free_mpage(pagemap, pages);
--gc->num_gen1_pages;
pages = next; pages = next;
} }
gc->release_pages = NULL; gc->release_pages = NULL;
@ -4752,6 +4757,7 @@ static void clean_up_heap(NewGC *gc)
if(next) work->next->prev = prev; if(next) work->next->prev = prev;
GCVERBOSEPAGE(gc, "Cleaning up BIGPAGE", work); GCVERBOSEPAGE(gc, "Cleaning up BIGPAGE", work);
gen1_free_mpage(pagemap, work); gen1_free_mpage(pagemap, work);
--gc->num_gen1_pages;
} else { } else {
GCVERBOSEPAGE(gc, "clean_up_heap BIG PAGE ALIVE", work); GCVERBOSEPAGE(gc, "clean_up_heap BIG PAGE ALIVE", work);
work->marked_on = 0; work->marked_on = 0;
@ -4810,6 +4816,7 @@ static void clean_up_heap(NewGC *gc)
if(next) work->next->prev = prev; if(next) work->next->prev = prev;
GCVERBOSEPAGE(gc, "Cleaning up MED NO MARKEDON", work); GCVERBOSEPAGE(gc, "Cleaning up MED NO MARKEDON", work);
gen1_free_mpage(pagemap, work); gen1_free_mpage(pagemap, work);
--gc->num_gen1_pages;
} else { } else {
/* not marked during minor gc */ /* not marked during minor gc */
memory_in_use += work->live_size; memory_in_use += work->live_size;
@ -5372,7 +5379,7 @@ void GC_dump_variable_stack(void **var_stack,
static void free_child_gc(void) static void free_child_gc(void)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
int i; int i, ty;
mpage *work; mpage *work;
mpage *next; mpage *next;
PageMap pagemap = gc->page_maps; PageMap pagemap = gc->page_maps;
@ -5384,10 +5391,9 @@ static void free_child_gc(void)
/* First, unprotect all pages. It's important to "queue" up all this work /* First, unprotect all pages. It's important to "queue" up all this work
as a batch to minimize commuincation with the OS and avoid fragmenting as a batch to minimize commuincation with the OS and avoid fragmenting
the OS's table (in the case of Linux) that tracks page permissions. */ the OS's table (in the case of Linux) that tracks page permissions. */
for(i = 0; i < PAGE_TYPES; i++) { for (i = 0; i < PAGE_TYPES; i++) {
if(i != PAGE_ATOMIC) { if (i != PAGE_ATOMIC) {
for (work = gc->gen1_pages[i]; work; work = next) { for (work = gc->gen1_pages[i]; work; work = work->next) {
next = work->next;
if (work->mprotected) { if (work->mprotected) {
work->mprotected = 0; work->mprotected = 0;
mmu_queue_write_unprotect_range(gc->mmu, work->addr, real_page_size(work), page_mmu_type(work), &work->mmu_src_block); mmu_queue_write_unprotect_range(gc->mmu, work->addr, real_page_size(work), page_mmu_type(work), &work->mmu_src_block);
@ -5395,13 +5401,31 @@ static void free_child_gc(void)
} }
} }
} }
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[MED_PAGE_NONATOMIC][i]; work; work = work->next) {
if (work->mprotected)
mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work));
}
}
mmu_flush_write_unprotect_ranges(gc->mmu); mmu_flush_write_unprotect_ranges(gc->mmu);
for(i = 0; i < PAGE_TYPES; i++) { for (i = 0; i < PAGE_TYPES; i++) {
for (work = gc->gen1_pages[i]; work; work = next) { for (work = gc->gen1_pages[i]; work; work = next) {
next = work->next; next = work->next;
GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work); GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work);
gen1_free_mpage(pagemap, work); gen1_free_mpage(pagemap, work);
--gc->num_gen1_pages;
}
}
for (ty = 0; ty < MED_PAGE_TYPES; ty++) {
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[ty][i]; work; work = next) {
next = work->next;
GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work);
gen1_free_mpage(pagemap, work);
--gc->num_gen1_pages;
}
} }
} }
@ -5417,7 +5441,7 @@ static void free_child_gc(void)
void GC_free_all(void) void GC_free_all(void)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
int i; int i, ty;
mpage *work; mpage *work;
mpage *next; mpage *next;
PageMap pagemap = gc->page_maps; PageMap pagemap = gc->page_maps;
@ -5428,16 +5452,27 @@ void GC_free_all(void)
gen0_free_entire_nursery(gc); gen0_free_entire_nursery(gc);
gen_half_free_entire_nursery(gc); gen_half_free_entire_nursery(gc);
for(i = 0; i < PAGE_TYPES; i++) { for (i = 0; i < PAGE_TYPES; i++) {
for (work = gc->gen1_pages[i]; work; work = next) { for (work = gc->gen1_pages[i]; work; work = next) {
next = work->next; next = work->next;
if (work->mprotected) if (work->mprotected)
{
mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work)); mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work));
}
GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work); GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work);
gen1_free_mpage(pagemap, work); gen1_free_mpage(pagemap, work);
--gc->num_gen1_pages;
}
}
for (ty = 0; ty < MED_PAGE_TYPES; ty++) {
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[ty][i]; work; work = next) {
next = work->next;
if (work->mprotected)
mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work));
GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work);
gen1_free_mpage(pagemap, work);
--gc->num_gen1_pages;
}
} }
} }

View File

@ -13,8 +13,18 @@ typedef struct mpage {
struct mpage *next; struct mpage *next;
struct mpage *prev; struct mpage *prev;
void *addr; void *addr;
void *mmu_src_block;
#ifdef MZ_GC_BACKTRACE
void **backtrace;
void *backtrace_page_src;
#endif
#ifdef MZ_USE_PLACES
uintptr_t page_lock; /* for master GC pages during marking */
#endif
uintptr_t previous_size; /* for med page, place to search for available block; for jit nursery, allocated size */ uintptr_t previous_size; /* for med page, place to search for available block; for jit nursery, allocated size */
uintptr_t size; /* big page size, med page element size, or nursery starting point */ uintptr_t size; /* big page size, med page element size, or nursery starting point */
struct mpage *backpointer_next;
unsigned short live_size;
unsigned char generation :2; unsigned char generation :2;
unsigned char back_pointers :1; unsigned char back_pointers :1;
unsigned char size_class :2; /* 0 => small; 1 => med; 2 => big; 3 => big marked */ unsigned char size_class :2; /* 0 => small; 1 => med; 2 => big; 3 => big marked */
@ -23,15 +33,6 @@ typedef struct mpage {
unsigned char marked_from :1; unsigned char marked_from :1;
unsigned char has_new :1; unsigned char has_new :1;
unsigned char mprotected :1; unsigned char mprotected :1;
unsigned short live_size;
#ifdef MZ_GC_BACKTRACE
void **backtrace;
void *backtrace_page_src;
#endif
void *mmu_src_block;
#ifdef MZ_USE_PLACES
uintptr_t page_lock; /* for master GC pages during marking */
#endif
} mpage; } mpage;
typedef struct Gen0 { typedef struct Gen0 {
@ -138,6 +139,12 @@ typedef struct NewGC {
struct mpage *med_pages[MED_PAGE_TYPES][NUM_MED_PAGE_SIZES]; struct mpage *med_pages[MED_PAGE_TYPES][NUM_MED_PAGE_SIZES];
struct mpage *med_freelist_pages[MED_PAGE_TYPES][NUM_MED_PAGE_SIZES]; struct mpage *med_freelist_pages[MED_PAGE_TYPES][NUM_MED_PAGE_SIZES];
intptr_t num_gen1_pages;
/* linked list of pages with back pointers to be traversed in a
minor collection: */
struct mpage *backpointer_next;
MarkSegment *mark_stack; MarkSegment *mark_stack;
/* Finalization */ /* Finalization */