better GC support for medium-sized immobile objects

svn: r14560
This commit is contained in:
Matthew Flatt 2009-04-19 15:47:52 +00:00
parent 434ec53b88
commit 5a4f15f5f9
4 changed files with 507 additions and 204 deletions

View File

@ -239,7 +239,9 @@ inline static unsigned long custodian_usage(NewGC*gc, void *custodian)
inline static void BTC_memory_account_mark(NewGC *gc, mpage *page, void *ptr)
{
GCDEBUG((DEBUGOUTF, "BTC_memory_account_mark: %p/%p\n", page, ptr));
if(page->big_page) {
if(page->size_class) {
if(page->size_class > 1) {
/* big page */
struct objhead *info = (struct objhead *)(NUM(page->addr) + PREFIX_SIZE);
if(info->btc_mark == gc->old_btc_mark) {
@ -247,6 +249,17 @@ inline static void BTC_memory_account_mark(NewGC *gc, mpage *page, void *ptr)
account_memory(gc, gc->current_mark_owner, gcBYTES_TO_WORDS(page->size));
push_ptr(ptr);
}
} else {
/* medium page */
struct objhead *info = MED_OBJHEAD(ptr, page->size);
if(info->btc_mark == gc->old_btc_mark) {
info->btc_mark = gc->new_btc_mark;
account_memory(gc, gc->current_mark_owner, info->size);
ptr = PTR(NUM(info) + WORD_SIZE);
push_ptr(ptr);
}
}
} else {
struct objhead *info = (struct objhead *)((char*)ptr - WORD_SIZE);
@ -315,9 +328,9 @@ int BTC_cust_box_mark(void *p)
return gc->mark_table[btc_redirect_cust_box](p);
}
inline static void mark_normal_obj(NewGC *gc, mpage *page, void *ptr)
inline static void mark_normal_obj(NewGC *gc, int type, void *ptr)
{
switch(page->page_type) {
switch(type) {
case PAGE_TAGGED: {
/* we do not want to mark the pointers in a thread or custodian
unless the object's owner is the current owner. In the case
@ -374,7 +387,6 @@ inline static void mark_acc_big_page(NewGC *gc, mpage *page)
}
}
static void btc_overmem_abort(NewGC *gc)
{
gc->kill_propagation_loop = 1;
@ -391,10 +403,16 @@ static void propagate_accounting_marks(NewGC *gc)
page = pagemap_find_page(pagemap, p);
set_backtrace_source(p, page->page_type);
GCDEBUG((DEBUGOUTF, "btc_account: popped off page %p:%p, ptr %p\n", page, page->addr, p));
if(page->big_page)
if(page->size_class) {
if (page->size_class > 1)
mark_acc_big_page(gc, page);
else
mark_normal_obj(gc, page, p);
else {
struct objhead *info = MED_OBJHEAD(p, page->size);
p = PTR(NUM(info) + WORD_SIZE);
mark_normal_obj(gc, info->type, p);
}
} else
mark_normal_obj(gc, page->page_type, p);
}
if(gc->kill_propagation_loop)
reset_pointer_stack();

View File

@ -91,7 +91,6 @@ inline static int is_master_gc(NewGC *gc) {
return (MASTERGC == gc);
}
#include "msgprint.c"
/*****************************************************************************/
@ -364,7 +363,7 @@ inline static void pagemap_modify_with_size(PageMap pagemap, mpage *page, long s
}
inline static void pagemap_modify(PageMap pagemap, mpage *page, mpage *val) {
long size = page->big_page ? page->size : APAGE_SIZE;
long size = (page->size_class > 1) ? page->size : APAGE_SIZE;
pagemap_modify_with_size(pagemap, page, size, val);
}
@ -420,6 +419,8 @@ int GC_is_allocated(void *p)
#endif
#define PREFIX_SIZE (PREFIX_WSIZE * WORD_SIZE)
#define MED_OBJHEAD(p, bytesize) ((struct objhead *)(PTR(((((NUM(p) & (APAGE_SIZE - 1)) - PREFIX_SIZE) / bytesize) * bytesize) \
+ (NUM(p) & (~(APAGE_SIZE - 1))) + PREFIX_SIZE)))
/* this is the maximum size of an object that will fit on a page, in words.
the "- 3" is basically used as a fudge/safety factor, and has no real,
@ -437,7 +438,7 @@ int GC_is_allocated(void *p)
gc->gen0.curr_alloc_page is the member of this list we are currently allocating on.
The size count helps us trigger collection quickly when we're running out of space; see
the test in allocate_big.
*/
*/
THREAD_LOCAL unsigned long GC_gen0_alloc_page_ptr = 0;
THREAD_LOCAL unsigned long GC_gen0_alloc_page_end = 0;
@ -511,7 +512,7 @@ static void *allocate_big(size_t sizeb, int type)
addr = malloc_pages(gc, round_to_apage_size(sizeb), APAGE_SIZE);
bpage->addr = addr;
bpage->size = sizeb;
bpage->big_page = 1;
bpage->size_class = 2;
bpage->page_type = type;
/* push new bpage onto GC->gen0.big_pages */
@ -554,12 +555,86 @@ static void *allocate_big(size_t sizeb, int type)
# endif
#endif
static void *allocate_medium(size_t sizeb, int type)
{
NewGC *gc;
int sz = 8, pos = 0, n;
void *addr, *p;
struct mpage *page;
struct objhead *info;
if (sizeb > (1 << (LOG_APAGE_SIZE - 1)))
return allocate_big(sizeb, type);
while (sz < sizeb) {
sz <<= 1;
pos++;
}
sz += WORD_SIZE; /* add trailing word, in case pointer is to end */
sz += WORD_SIZE; /* room for objhead */
sz = ALIGN_BYTES_SIZE(sz);
gc = GC_get_GC();
while (1) {
page = gc->med_freelist_pages[pos];
if (page) {
n = page->previous_size;
while (n < APAGE_SIZE) {
info = (struct objhead *)PTR(NUM(page->addr) + n);
if (info->dead) {
info->dead = 0;
info->type = type;
page->previous_size = (n + sz);
page->live_size += sz;
p = PTR(NUM(info) + WORD_SIZE);
memset(p, 0, sz - WORD_SIZE);
return p;
}
n += sz;
}
gc->med_freelist_pages[pos] = page->prev;
} else
break;
}
page = malloc_mpage();
addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE);
page->addr = addr;
page->size = sz;
page->size_class = 1;
page->page_type = PAGE_BIG;
page->previous_size = PREFIX_SIZE;
page->live_size = sz;
for (n = page->previous_size; (n + sz) <= APAGE_SIZE; n += sz) {
info = (struct objhead *)PTR(NUM(page->addr) + n);
info->dead = 1;
info->size = gcBYTES_TO_WORDS(sz);
}
page->next = gc->med_pages[pos];
if (page->next)
page->next->prev = page;
gc->med_pages[pos] = page;
gc->med_freelist_pages[pos] = page;
pagemap_add(gc->page_maps, page);
n = page->previous_size;
info = (struct objhead *)PTR(NUM(page->addr) + n);
info->dead = 0;
info->type = type;
return PTR(NUM(info) + WORD_SIZE);
}
inline static struct mpage *gen0_create_new_mpage(NewGC *gc) {
mpage *newmpage;
newmpage = malloc_mpage(gc);
newmpage->addr = malloc_dirty_pages(gc, GEN0_PAGE_SIZE, APAGE_SIZE);
newmpage->big_page = 0;
newmpage->size_class = 0;
newmpage->size = PREFIX_SIZE;
pagemap_add_with_size(gc->page_maps, newmpage, GEN0_PAGE_SIZE);
@ -721,9 +796,9 @@ void *GC_malloc_one_xtagged(size_t s) { return allocate(s, PAGE_XTAG
void *GC_malloc_array_tagged(size_t s) { return allocate(s, PAGE_TARRAY); }
void *GC_malloc_atomic(size_t s) { return allocate(s, PAGE_ATOMIC); }
void *GC_malloc_atomic_uncollectable(size_t s) { void *p = ofm_malloc_zero(s); return p; }
void *GC_malloc_allow_interior(size_t s) { return allocate_big(s, PAGE_ARRAY); }
void *GC_malloc_allow_interior(size_t s) { return allocate_medium(s, PAGE_ARRAY); }
void *GC_malloc_atomic_allow_interior(size_t s) { return allocate_big(s, PAGE_ATOMIC); }
void *GC_malloc_tagged_allow_interior(size_t s) { return allocate_big(s, PAGE_TAGGED); }
void *GC_malloc_tagged_allow_interior(size_t s) { return allocate_medium(s, PAGE_TAGGED); }
void *GC_malloc_one_small_dirty_tagged(size_t s) { return fast_malloc_one_small_tagged(s, 1); }
void *GC_malloc_one_small_tagged(size_t s) { return fast_malloc_one_small_tagged(s, 0); }
void GC_free(void *p) {}
@ -822,14 +897,21 @@ inline static void reset_nursery(NewGC *gc)
false if it isn't. This function assumes that you're talking, at this
point, purely about the mark field of the object. It ignores things like
the object not being one of our GC heap objects, being in a higher gen
than we're collectiong, not being a pointer at all, etc. */
than we're collecting, not being a pointer at all, etc. */
inline static int marked(NewGC *gc, void *p)
{
struct mpage *page;
if(!p) return 0;
if(!(page = pagemap_find_page(gc->page_maps, p))) return 1;
if((NUM(page->addr) + page->previous_size) > NUM(p)) return 1;
if (page->size_class) {
if (page->size_class > 1) {
return (page->size_class > 2);
}
} else {
if((NUM(page->addr) + page->previous_size) > NUM(p))
return 1;
}
return ((struct objhead *)(NUM(p) - WORD_SIZE))->mark;
}
@ -953,7 +1035,7 @@ static void set_backtrace_source(void *source, int type)
}
static void record_backtrace(struct mpage *page, void *ptr)
/* ptr is after objhead */
/* ptr is after objhead */
{
unsigned long delta;
@ -976,12 +1058,16 @@ static void copy_backtrace_source(struct mpage *to_page, void *to_ptr,
}
static void *get_backtrace(struct mpage *page, void *ptr)
/* ptr is after objhead */
/* ptr is after objhead */
{
unsigned long delta;
if (page->big_page)
if (page->size_class) {
if (page->size_class > 1)
ptr = PTR((char *)page->addr + PREFIX_SIZE + WORD_SIZE);
else
ptr = (char *)MED_OBJHEAD(ptr, page->size) + WORD_SIZE;
}
delta = PPTR(ptr) - PPTR(page->addr);
return page->backtrace[delta - 1];
@ -1082,7 +1168,7 @@ static inline void *get_stack_base(NewGC *gc) {
} \
} \
} \
}
}
inline static void mark_roots(NewGC *gc)
{
@ -1432,7 +1518,7 @@ static int designate_modified_gc(NewGC *gc, void *p)
if(page) {
if (!page->back_pointers) {
page->mprotected = 0;
vm_protect_pages(page->addr, page->big_page ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
vm_protect_pages(page->addr, (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
page->back_pointers = 1;
return 1;
}
@ -1655,15 +1741,16 @@ void GC_mark(const void *const_p)
#endif
}
if(page->big_page) {
if(page->size_class) {
if(page->size_class > 1) {
/* This is a bigpage. The first thing we do is see if its been marked
previously */
if(page->big_page != 1) {
if(page->size_class != 2) {
GCDEBUG((DEBUGOUTF, "Not marking %p on big %p (already marked)\n", p, page));
return;
}
/* in this case, it has not. So we want to mark it, first off. */
page->big_page = 2;
page->size_class = 3;
/* if this is in the nursery, we want to move it out of the nursery */
if(!page->generation) {
@ -1695,8 +1782,20 @@ void GC_mark(const void *const_p)
/* Finally, we want to add this to our mark queue, so we can
propagate its pointers */
push_ptr(p);
} else {
/* A medium page. */
struct objhead *info = MED_OBJHEAD(p, page->size);
if (info->mark) {
GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
return;
}
else {
info->mark = 1;
page->marked_on = 1;
p = PTR(NUM(info) + WORD_SIZE);
record_backtrace(page, p);
push_ptr(p);
}
} else {
struct objhead *ohead = (struct objhead *)(NUM(p) - WORD_SIZE);
if(ohead->mark) {
@ -1820,7 +1919,8 @@ static void propagate_marks(NewGC *gc)
/* we can assume a lot here -- like it's a valid pointer with a page --
because we vet bad cases out in GC_mark, above */
if(page->big_page) {
if(page->size_class) {
if(page->size_class > 1) {
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE);
void **end = PPTR(NUM(page->addr) + page->size);
@ -1840,7 +1940,8 @@ static void propagate_marks(NewGC *gc)
case PAGE_ATOMIC: break;
case PAGE_ARRAY: while(start < end) gcMARK(*(start++)); break;
case PAGE_XTAGGED: GC_mark_xtagged(start); break;
case PAGE_TARRAY: {
case PAGE_TARRAY:
{
unsigned short tag = *(unsigned short *)start;
end -= INSET_WORDS;
while(start < end) {
@ -1850,6 +1951,29 @@ static void propagate_marks(NewGC *gc)
break;
}
}
} else {
/* Medium page */
struct objhead *info = (struct objhead *)(NUM(p) - WORD_SIZE);
set_backtrace_source(p, info->type);
switch(info->type) {
case PAGE_TAGGED:
{
unsigned short tag = *(unsigned short*)p;
GC_ASSERT(mark_table[tag]);
mark_table[tag](p);
break;
}
case PAGE_ARRAY:
{
void **start = p;
void **end = PPTR(info) + info->size;
while(start < end) gcMARK(*start++);
break;
}
}
}
} else {
struct objhead *info = (struct objhead *)(NUM(p) - WORD_SIZE);
@ -1892,7 +2016,7 @@ void *GC_resolve(void *p)
struct mpage *page = pagemap_find_page(gc->page_maps, p);
struct objhead *info;
if(!page || page->big_page)
if(!page || page->size_class)
return p;
info = (struct objhead *)(NUM(p) - WORD_SIZE);
@ -1920,7 +2044,7 @@ void GC_fixup(void *pp)
if((page = pagemap_find_page(gc->page_maps, p))) {
struct objhead *info;
if(page->big_page) return;
if(page->size_class) return;
info = (struct objhead *)(NUM(p) - WORD_SIZE);
if(info->mark && info->moved)
*(void**)pp = *(void**)p;
@ -1935,12 +2059,15 @@ void GC_fixup(void *pp)
#ifdef MZ_GC_BACKTRACE
# define trace_page_t struct mpage
# define trace_page_type(page) (page)->page_type
static void *trace_pointer_start(struct mpage *page, void *p) {
if (page->big_page)
static void *trace_pointer_start(struct mpage *page, void *p) {
if (page->size_class) {
if (page->size_class > 1)
return PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE);
else
return PTR(NUM(MED_OBJHEAD(p, page->size)) + WORD_SIZE);
} else
return p;
}
}
# define TRACE_PAGE_TAGGED PAGE_TAGGED
# define TRACE_PAGE_ARRAY PAGE_ARRAY
# define TRACE_PAGE_TAGGED_ARRAY PAGE_TARRAY
@ -1948,7 +2075,7 @@ void GC_fixup(void *pp)
# define TRACE_PAGE_XTAGGED PAGE_XTAGGED
# define TRACE_PAGE_MALLOCFREE PAGE_TYPES
# define TRACE_PAGE_BAD PAGE_TYPES
# define trace_page_is_big(page) (page)->big_page
# define trace_page_is_big(page) (page)->size_class
# define trace_backpointer get_backtrace
# include "backtrace.c"
#else
@ -2098,49 +2225,81 @@ void *GC_next_tagged_start(void *p)
/* garbage collection */
/*****************************************************************************/
static void reset_gen1_page(NewGC *gc, mpage *work)
{
if (gc->generations_available && work->mprotected) {
work->mprotected = 0;
add_protect_page_range(gc->protect_range, work->addr,
(work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE,
APAGE_SIZE, 1);
}
}
static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc)
{
Page_Range *protect_range = gc->protect_range;
mpage *work;
int i;
GCDEBUG((DEBUGOUTF, "MAJOR COLLECTION - PREPPING PAGES - reset live_size, reset previous_size, unprotect.\n"));
/* we need to make sure that previous_size for every page is reset, so
we don't accidentally screw up the mark routine */
for(i = 0; i < PAGE_TYPES; i++) {
for(work = gc->gen1_pages[i]; work; work = work->next) {
if (gc->generations_available && work->mprotected) {
work->mprotected = 0;
add_protect_page_range(protect_range, work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, APAGE_SIZE, 1);
}
reset_gen1_page(gc, work);
work->live_size = 0;
work->previous_size = PREFIX_SIZE;
}
}
flush_protect_page_ranges(protect_range, 1);
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[i]; work; work = work->next) {
if (work->generation) {
reset_gen1_page(gc, work);
}
}
}
flush_protect_page_ranges(gc->protect_range, 1);
}
static void remove_gen1_page_from_pagemap(NewGC *gc, mpage *work)
{
if (gc->generations_available && work->back_pointers && work->mprotected) {
work->mprotected = 0;
add_protect_page_range(gc->protect_range, work->addr,
(work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE,
APAGE_SIZE, 1);
}
pagemap_remove(gc->page_maps, work);
work->added = 0;
}
static void remove_all_gen1_pages_from_pagemap(NewGC *gc)
{
Page_Range *protect_range = gc->protect_range;
PageMap pagemap = gc->page_maps;
mpage *work;
int i;
GCDEBUG((DEBUGOUTF, "MINOR COLLECTION - PREPPING PAGES - remove all gen1 pages from pagemap.\n"));
/* if we're not doing a major collection, then we need to remove all the
pages in gc->gen1_pages[] from the page map */
for(i = 0; i < PAGE_TYPES; i++) {
for(work = gc->gen1_pages[i]; work; work = work->next) {
if (gc->generations_available && work->back_pointers && work->mprotected) {
work->mprotected = 0;
add_protect_page_range(protect_range, work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, APAGE_SIZE, 1);
}
pagemap_remove(pagemap, work);
work->added = 0;
remove_gen1_page_from_pagemap(gc, work);
}
}
flush_protect_page_ranges(protect_range, 1);
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[i]; work; work = work->next) {
if (work->generation) {
remove_gen1_page_from_pagemap(gc, work);
}
}
}
flush_protect_page_ranges(gc->protect_range, 1);
}
static void mark_backpointers(NewGC *gc)
@ -2151,7 +2310,7 @@ static void mark_backpointers(NewGC *gc)
PageMap pagemap = gc->page_maps;
/* if this is not a full collection, then we need to mark any pointers
which point backwards into generation 0, since they're roots. */
that point backwards into generation 0, since they're roots. */
for(i = 0; i < PAGE_TYPES; i++) {
for(work = gc->gen1_pages[i]; work; work = work->next) {
if(work->back_pointers) {
@ -2160,8 +2319,9 @@ static void mark_backpointers(NewGC *gc)
work->marked_on = 1;
work->previous_size = PREFIX_SIZE;
pagemap_add(pagemap, work);
if(work->big_page) {
work->big_page = 2;
if(work->size_class) {
/* must be a big page */
work->size_class = 3;
push_ptr(PPTR(NUM(work->addr) + PREFIX_SIZE + sizeof(struct objhead)));
} else {
if(work->page_type != PAGE_ATOMIC) {
@ -2190,6 +2350,28 @@ static void mark_backpointers(NewGC *gc)
}
}
}
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (work = gc->med_pages[i]; work; work = work->next) {
if(work->back_pointers) {
void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
work->marked_on = 1;
pagemap_add(pagemap, work);
while(start <= end) {
struct objhead *info = (struct objhead *)start;
if(!info->dead) {
info->mark = 1;
/* This must be a push_ptr (see below) */
push_ptr(start + 1);
}
start += info->size;
}
}
}
}
}
}
@ -2202,7 +2384,7 @@ struct mpage *allocate_compact_target(NewGC *gc, mpage *work)
npage->previous_size = npage->size = PREFIX_SIZE;
npage->generation = 1;
npage->back_pointers = 0;
npage->big_page = 0;
npage->size_class = 0;
npage->page_type = work->page_type;
npage->marked_on = 1;
backtrace_new_page(gc, npage);
@ -2324,13 +2506,14 @@ static void repair_heap(NewGC *gc)
if(page->marked_on) {
page->has_new = 0;
/* these are guaranteed not to be protected */
if(page->big_page) {
if(page->size_class) {
/* since we get here via gen1_pages, it's a big page */
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE);
void **end = PPTR(NUM(page->addr) + page->size);
GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
page, start));
page->big_page = 1; /* remove the mark */
page->size_class = 2; /* remove the mark */
switch(page->page_type) {
case PAGE_TAGGED:
fixup_table[*(unsigned short*)start](start);
@ -2423,10 +2606,43 @@ static void repair_heap(NewGC *gc)
} else GCDEBUG((DEBUGOUTF,"Not Cleaning page %p\n", page));
}
}
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (page = gc->med_pages[i]; page; page = page->next) {
if (page->marked_on) {
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
while(start < end) {
struct objhead *info = (struct objhead *)start;
if(info->mark) {
switch(info->type) {
case PAGE_ARRAY:
{
void **tempend = (start++) + info->size;
while(start < tempend) gcFIXUP(*start++);
}
break;
case PAGE_TAGGED:
{
fixup_table[*(unsigned short*)(start+1)](start+1);
start += info->size;
}
break;
}
info->mark = 0;
} else {
info->dead = 1;
start += info->size;
}
}
}
}
}
}
static inline void gen1_free_mpage(PageMap pagemap, mpage *page) {
size_t real_page_size = page->big_page ? round_to_apage_size(page->size) : APAGE_SIZE;
size_t real_page_size = (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE;
pagemap_remove(pagemap, page);
free_backtrace(page);
free_pages(GC, page->addr, real_page_size);
@ -2496,6 +2712,61 @@ static void clean_up_heap(NewGC *gc)
}
}
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
mpage *work;
mpage *prev = NULL, *next;
for (work = gc->med_pages[i]; work; work = next) {
if (work->marked_on) {
void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size);
int non_dead = 0;
while(start <= end) {
struct objhead *info = (struct objhead *)start;
if (!info->dead) {
non_dead++;
}
start += info->size;
}
next = work->next;
if (non_dead) {
work->live_size = (work->size * non_dead);
memory_in_use += work->live_size;
work->previous_size = PREFIX_SIZE;
work->back_pointers = work->marked_on = 0;
work->generation = 1;
pagemap_add(pagemap, work);
prev = work;
} else {
/* free the page */
if(prev) prev->next = next; else gc->med_pages[i] = next;
if(next) work->next->prev = prev;
if (work->mprotected) *(long *)0x0 = 1;
gen1_free_mpage(pagemap, work);
}
} else if (gc->gc_full || !work->generation) {
/* Page wasn't touched in full GC, or gen-0 not touched,
so we can free it. */
next = work->next;
if(prev) prev->next = next; else gc->med_pages[i] = next;
if(next) work->next->prev = prev;
if (work->mprotected) *(long *)0x0 = 1;
gen1_free_mpage(pagemap, work);
} else {
/* not touched during minor gc */
memory_in_use += work->live_size;
work->previous_size = PREFIX_SIZE;
next = work->next;
prev = work;
work->back_pointers = 0;
pagemap_add(pagemap, work);
}
}
gc->med_freelist_pages[i] = prev;
}
gc->memory_in_use = memory_in_use;
cleanup_vacated_pages(gc);
}
@ -2506,7 +2777,7 @@ static void protect_old_pages(NewGC *gc)
struct mpage *page;
int i;
for(i = 0; i < PAGE_TYPES; i++)
for(i = 0; i < PAGE_TYPES; i++) {
if(i != PAGE_ATOMIC)
for(page = gc->gen1_pages[i]; page; page = page->next)
if(page->page_type != PAGE_ATOMIC) {
@ -2515,6 +2786,16 @@ static void protect_old_pages(NewGC *gc)
add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 0);
}
}
}
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (page = gc->med_pages[i]; page; page = page->next) {
if (!page->mprotected) {
page->mprotected = 1;
add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 0);
}
}
}
flush_protect_page_ranges(protect_range, 0);
}
@ -2822,7 +3103,7 @@ void GC_free_all(void)
next = work->next;
if (work->mprotected)
vm_protect_pages(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
vm_protect_pages(work->addr, (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
gen1_free_mpage(pagemap, work);
}
}

View File

@ -5,8 +5,8 @@ typedef struct mpage {
struct mpage *next;
struct mpage *prev;
void *addr;
unsigned long previous_size;
unsigned long size;
unsigned long previous_size; /* for med page, points to place to search for available block */
unsigned long size; /* big page size or med page element size */
unsigned char generation;
/*
unsigned char back_pointers :1;
@ -17,7 +17,7 @@ typedef struct mpage {
unsigned char mprotected :1;
*/
unsigned char back_pointers ;
unsigned char big_page ;
unsigned char size_class ; /* 1 => med; 2 => big; 3 => big marked */
unsigned char page_type ;
unsigned char marked_on ;
unsigned char has_new ;
@ -92,6 +92,8 @@ typedef mpage ****PageMap;
typedef mpage **PageMap;
#endif
#define NUM_MED_PAGE_SIZES (((LOG_APAGE_SIZE - 1) - 3) + 1)
typedef struct NewGC {
Gen0 gen0;
Mark_Proc *mark_table; /* the table of mark procs */
@ -101,6 +103,8 @@ typedef struct NewGC {
struct mpage *gen1_pages[PAGE_TYPES];
Page_Range *protect_range;
struct mpage *med_pages[NUM_MED_PAGE_SIZES];
struct mpage *med_freelist_pages[NUM_MED_PAGE_SIZES];
/* Finalization */
Fnl *run_queue;

View File

@ -1140,7 +1140,7 @@ typedef struct Scheme_Cont_Mark_Set {
Scheme_Object *native_stack_trace;
} Scheme_Cont_Mark_Set;
#define SCHEME_LOG_MARK_SEGMENT_SIZE 8
#define SCHEME_LOG_MARK_SEGMENT_SIZE 6
#define SCHEME_MARK_SEGMENT_SIZE (1 << SCHEME_LOG_MARK_SEGMENT_SIZE)
#define SCHEME_MARK_SEGMENT_MASK (SCHEME_MARK_SEGMENT_SIZE - 1)