[Places] MasterGC allocation and collection; place_wait; master initiated child collection

svn: r17139
This commit is contained in:
Kevin Tew 2009-12-01 17:26:45 +00:00
parent 86697e8555
commit 788d913bba
9 changed files with 529 additions and 52 deletions

View File

@ -1,4 +1,3 @@
#ifndef __mzscheme_gc_2__
#define __mzscheme_gc_2__
@ -419,6 +418,16 @@ GC2_EXTERN unsigned long GC_make_jit_nursery_page();
with the next GC.
*/
GC2_EXTERN void GC_check_master_gc_request();
/*
Checks to see if the master has requested a places major GC run
and executes a GC if requested
*/
GC2_EXTERN void GC_set_put_external_event_fd(void *fd);
/*
Sets the fd that can be passed to scheme_signal_received_at to wake up the place for GC
*/
# ifdef __cplusplus
};

View File

@ -58,6 +58,12 @@
# define GC_ASSERT(x) /* empty */
#endif
#if 0
# define GC_LOCK_DEBUG(args) printf(args)
#else
# define GC_LOCK_DEBUG(args) /* empty */
#endif
/* the page type constants */
enum {
PAGE_TAGGED = 0,
@ -70,6 +76,12 @@ enum {
PAGE_TYPES = 6,
};
enum {
SIZE_CLASS_MED_PAGE = 1,
SIZE_CLASS_BIG_PAGE = 2,
SIZE_CLASS_BIG_PAGE_MARKED = 3,
};
static const char *type_name[PAGE_TYPES] = {
"tagged",
"atomic",
@ -81,21 +93,53 @@ static const char *type_name[PAGE_TYPES] = {
#include "newgc.h"
#ifdef MZ_USE_PLACES
static NewGC *MASTERGC;
static NewGCMasterInfo *MASTERGCINFO;
THREAD_LOCAL_DECL(static objhead GC_objhead_template);
inline static int premaster_or_master_gc(NewGC *gc) {
return (!MASTERGC || gc == MASTERGC);
}
inline static int premaster_or_place_gc(NewGC *gc) {
return (!MASTERGC || gc != MASTERGC);
}
inline static int postmaster_and_master_gc(NewGC *gc) {
return (MASTERGC && gc == MASTERGC);
}
inline static int postmaster_and_place_gc(NewGC *gc) {
return (MASTERGC && gc != MASTERGC);
}
static FILE *GCVERBOSEFH;
static FILE* gcdebugOUT() {
if (GCVERBOSEFH) { fflush(GCVERBOSEFH); }
else { GCVERBOSEFH = fopen("GCDEBUGOUT", "w"); }
return GCVERBOSEFH;
}
inline static size_t real_page_size(mpage* page);
#ifdef DEBUG_GC_PAGES
static void GCVERBOSEPAGE(const char *msg, mpage* page) {
fprintf(gcdebugOUT(), "%s %p %p %p\n", msg, page, page->addr, (void*)((long)page->addr + real_page_size(page)));
}
#else
#define GCVERBOSEPAGE(msg, page) /* EMPTY */
#endif
/* #define KILLING_DEBUG */
#ifdef KILLING_DEBUG
static void killing_debug(NewGC *gc, void *info);
static void marking_rmp_debug(NewGC *gc, void *info);
#endif
#else
#define GCVERBOSEPAGE(msg, page) /* EMPTY */
#endif
THREAD_LOCAL_DECL(static NewGC *GC);
#define GCTYPE NewGC
#define GC_get_GC() (GC)
#define GC_set_GC(gc) (GC = gc)
#ifdef MZ_USE_PLACES
inline static int is_master_gc(NewGC *gc) {
return (MASTERGC == gc);
}
#endif
#include "msgprint.c"
@ -538,7 +582,30 @@ static inline void* REMOVE_BIG_PAGE_PTR_TAG(void *p) {
return ((void *)((~((unsigned long) 1)) & ((unsigned long) p)));
}
void GC_check_master_gc_request() {
#ifdef MZ_USE_PLACES
if (MASTERGC && MASTERGC->major_places_gc == 1 && MASTERGCINFO->have_collected[GC_objhead_template.owner] != 0) {
GC_gcollect();
}
#endif
}
static inline void gc_if_needed_account_alloc_size(NewGC *gc, size_t allocate_size) {
if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) {
#ifdef MZ_USE_PLACES
if (postmaster_and_master_gc(gc)) {
MASTERGC->major_places_gc = 1;
}
else {
#endif
if (!gc->dumping_avoid_collection)
garbage_collect(gc, 0);
#ifdef MZ_USE_PLACES
}
#endif
}
gc->gen0.current_size += allocate_size;
}
/* the core allocation functions */
static void *allocate_big(const size_t request_size_bytes, int type)
@ -550,6 +617,9 @@ static void *allocate_big(const size_t request_size_bytes, int type)
#ifdef NEWGC_BTC_ACCOUNT
if(GC_out_of_memory) {
#ifdef MZ_USE_PLACES
if (premaster_or_place_gc(gc)) {
#endif
if (BTC_single_allocation_limit(gc, request_size_bytes)) {
/* We're allowed to fail. Check for allocations that exceed a single-time
limit. Otherwise, the limit doesn't work as intended, because
@ -560,6 +630,9 @@ static void *allocate_big(const size_t request_size_bytes, int type)
is independent of any existing object, then we can enforce the limit. */
GC_out_of_memory();
}
#ifdef MZ_USE_PLACES
}
#endif
}
#endif
@ -569,10 +642,7 @@ static void *allocate_big(const size_t request_size_bytes, int type)
aligned for Sparcs. */
allocate_size = COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(request_size_bytes);
if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) {
if (!gc->dumping_avoid_collection)
garbage_collect(gc, 0);
}
gc_if_needed_account_alloc_size(gc, allocate_size);
/* The following allocations may fail and escape if GC_out_of_memory is set.
We not only need APAGE_SIZE alignment, we
@ -590,6 +660,7 @@ static void *allocate_big(const size_t request_size_bytes, int type)
bpage->size = allocate_size;
bpage->size_class = 2;
bpage->page_type = type;
GCVERBOSEPAGE("NEW BIG PAGE", bpage);
#ifdef MZ_USE_PLACES
memcpy(BIG_PAGE_TO_OBJHEAD(bpage), &GC_objhead_template, sizeof(objhead));
@ -619,6 +690,7 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p
page->page_type = PAGE_BIG;
page->previous_size = PREFIX_SIZE;
page->live_size = sz;
GCVERBOSEPAGE("NEW MED PAGE", page);
for (n = page->previous_size; ((n + sz) <= APAGE_SIZE); n += sz) {
objhead *info = (objhead *)PTR(NUM(page->addr) + n);
@ -663,6 +735,29 @@ inline static void *medium_page_realloc_dead_slot(NewGC *gc, const int sz, const
}
return 0;
}
#if defined(linux)
/* #define MASTER_ALLOC_DEBUG */
#if defined(MASTER_ALLOC_DEBUG)
#include <execinfo.h>
#include <stdio.h>
#include <stdlib.h>
/* Obtain a backtrace and print it to stdout. */
void print_libc_backtrace (FILE *file)
{
void *array[100];
size_t size;
char **strings;
size_t i;
size = backtrace (array, 100);
strings = backtrace_symbols (array, size);
for (i = 0; i < size; i++)
fprintf(file, "%s\n", strings[i]);
free (strings);
}
#endif
#endif
static void *allocate_medium(const size_t request_size_bytes, const int type)
{
@ -683,7 +778,11 @@ static void *allocate_medium(const size_t request_size_bytes, const int type)
{
NewGC *gc = GC_get_GC();
void * objptr = medium_page_realloc_dead_slot(gc, sz, pos, type);
void *objptr;
/* gc_if_needed_account_alloc_size(gc, sz); */
objptr = medium_page_realloc_dead_slot(gc, sz, pos, type);
if (!objptr) {
mpage *page;
objhead *info;
@ -696,6 +795,13 @@ static void *allocate_medium(const size_t request_size_bytes, const int type)
objptr = OBJHEAD_TO_OBJPTR(info);
}
#ifdef MASTER_ALLOC_DEBUG
if (postmaster_and_master_gc(gc)) {
fprintf(gcdebugOUT(), "MASTERGC_allocate_medium %zi %i %i %i %i %p\n", request_size_bytes, type, sz, pos, 1 << (pos +3), objptr);
/* print_libc_backtrace(gcdebugOUT()); */
}
#endif
ASSERT_VALID_OBJPTR(objptr);
return objptr;
}
@ -709,6 +815,7 @@ inline static mpage *gen0_create_new_nursery_mpage(NewGC *gc, const size_t page_
newmpage->size_class = 0;
newmpage->size = PREFIX_SIZE;
pagemap_add_with_size(gc->page_maps, newmpage, page_size);
GCVERBOSEPAGE("NEW gen0", newmpage);
return newmpage;
}
@ -794,6 +901,11 @@ inline static void *allocate(const size_t request_size, const int type)
while (OVERFLOWS_GEN0(newptr)) {
NewGC *gc = GC_get_GC();
#ifdef MZ_USE_PLACES
if (postmaster_and_master_gc(gc)) {
return allocate_medium(request_size, type);
}
#endif
/* bring page size used up to date */
gc->gen0.curr_alloc_page->size = GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr);
gc->gen0.current_size += gc->gen0.curr_alloc_page->size;
@ -852,7 +964,9 @@ inline static void *allocate(const size_t request_size, const int type)
info->type = type;
info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
{
/* NewGC *gc = GC_get_GC(); */
void * objptr = OBJHEAD_TO_OBJPTR(info);
/* fprintf(gcdebugOUT(), "ALLOCATE page %p %zi %i %p\n", gc->gen0.curr_alloc_page->addr, request_size, type, objptr); */
ASSERT_VALID_OBJPTR(objptr);
return objptr;
}
@ -1699,6 +1813,10 @@ int GC_merely_accounting()
/* administration / initialization */
/*****************************************************************************/
static inline size_t real_page_size(mpage *page) {
return (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE;
}
static int designate_modified_gc(NewGC *gc, void *p)
{
mpage *page = pagemap_find_page(gc->page_maps, p);
@ -1711,7 +1829,7 @@ static int designate_modified_gc(NewGC *gc, void *p)
if(page) {
if (!page->back_pointers) {
page->mprotected = 0;
vm_protect_pages(page->addr, (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
vm_protect_pages(page->addr, real_page_size(page), 1);
page->back_pointers = 1;
}
/* For a single mutator thread, we shouldn't get here
@ -1754,11 +1872,76 @@ static void NewGCMasterInfo_cleanup() {
MASTERGCINFO = NULL;
}
static void NewGCMasterInfo_set_have_collected() {
MASTERGCINFO->have_collected[GC_objhead_template.owner] = 1;
}
static void Master_collect() {
NewGC *gc = GC_get_GC();
if (premaster_or_master_gc(gc)) return; /* master shouldn't attempt to start itself */
GC_switch_to_master_gc();
GC_LOCK_DEBUG("MGCLOCK Master_collect\n");
if ( MASTERGC->major_places_gc ) {
int i = 0;
int children_ready = 1;
int maxid = MASTERGCINFO->next_GC_id;
for (i=1; i < maxid; i++) {
int have_collected = MASTERGCINFO->have_collected[i];
if (have_collected == 1) {
printf("%i READY\n", i);
}
else if ( have_collected == 0) {
void *signal_fd = MASTERGCINFO->signal_fds[i];
printf("%i NOT COLLECTED\n", i);
children_ready = 0;
MASTERGCINFO->have_collected[i] = -1;
if (signal_fd >= 0 ) {
scheme_signal_received_at(signal_fd);
}
}
else {
printf("%i SIGNALED BUT NOT COLLECTED\n", i);
}
}
if (children_ready) {
for (i=0; i < maxid; i++) {
MASTERGCINFO->have_collected[i] = 0;
}
printf("START MASTER COLLECTION\n");
fprintf(gcdebugOUT(), "START MASTER COLLECTION\n");
MASTERGC->major_places_gc = 0;
garbage_collect(MASTERGC, 1);
printf("END MASTER COLLECTION\n");
fprintf(gcdebugOUT(), "END MASTER COLLECTION\n");
}
}
GC_LOCK_DEBUG("UNMGCLOCK Master_collect\n");
GC_switch_back_from_master(gc);
}
static void NewGCMasterInfo_get_next_id(NewGC *newgc) {
int newid;
/* this could just be an atomic op if we had those */
/* waiting for other threads to finish a possible concurrent GC is not optimal*/
mzrt_rwlock_wrlock(MASTERGCINFO->cangc);
GC_objhead_template.owner = MASTERGCINFO->next_GC_id++;
newid = MASTERGCINFO->next_GC_id++;
GC_objhead_template.owner = newid;
/* printf("ALLOCATED GC OID %li\n", GC_objhead_template.owner); */
MASTERGCINFO->have_collected = realloc(MASTERGCINFO->have_collected, sizeof(char) * MASTERGCINFO->next_GC_id);
MASTERGCINFO->signal_fds = realloc(MASTERGCINFO->signal_fds, sizeof(void*) * MASTERGCINFO->next_GC_id);
MASTERGCINFO->have_collected[newid] = 0;
MASTERGCINFO->signal_fds[newid] = -1;
mzrt_rwlock_unlock(MASTERGCINFO->cangc);
}
void GC_set_put_external_event_fd(void *fd) {
mzrt_rwlock_wrlock(MASTERGCINFO->cangc);
MASTERGCINFO->signal_fds[GC_objhead_template.owner] = fd;
mzrt_rwlock_unlock(MASTERGCINFO->cangc);
}
#endif
@ -1882,8 +2065,16 @@ void GC_switch_out_master_gc() {
static int initialized = 0;
if(!initialized) {
NewGC *gc = GC_get_GC();
initialized = 1;
MASTERGC = GC_get_GC();
garbage_collect(gc, 1);
#ifdef MZ_USE_PLACES
GC_gen0_alloc_page_ptr = 2;
GC_gen0_alloc_page_end = 1;
#endif
MASTERGC = gc;
MASTERGC->dumping_avoid_collection = 1;
save_globals_to_gc(MASTERGC);
GC_construct_child_gc();
@ -1905,13 +2096,13 @@ void GC_switch_in_master_gc() {
void *GC_switch_to_master_gc() {
NewGC *gc = GC_get_GC();
/* return if MASTERGC hasn't been constructed yet, allow recursive locking */
if (!MASTERGC || gc == MASTERGC) {
return MASTERGC;
}
if (premaster_or_master_gc(gc)) { return MASTERGC; }
save_globals_to_gc(gc);
/*obtain exclusive access to MASTERGC*/
mzrt_rwlock_wrlock(MASTERGCINFO->cangc);
GC_LOCK_DEBUG("MGCLOCK GC_switch_to_master_gc\n");
GC_set_GC(MASTERGC);
restore_globals_from_gc(MASTERGC);
@ -1920,19 +2111,18 @@ void *GC_switch_to_master_gc() {
void GC_switch_back_from_master(void *gc) {
/* return if MASTERGC hasn't been constructed yet, allow recursive locking */
if (!MASTERGC || gc == MASTERGC) {
return;
}
if (premaster_or_master_gc(gc)) { return; }
save_globals_to_gc(MASTERGC);
/*release exclusive access to MASTERGC*/
GC_LOCK_DEBUG("UNMGCLOCK GC_switch_to_master_gc\n");
mzrt_rwlock_unlock(MASTERGCINFO->cangc);
GC_set_GC(gc);
restore_globals_from_gc(gc);
}
#endif
void GC_gcollect(void)
@ -1995,8 +2185,13 @@ void GC_mark(const void *const_p)
gc = GC_get_GC();
if(!(page = pagemap_find_page(gc->page_maps, p))) {
GCDEBUG((DEBUGOUTF,"Not marking %p (no page)\n",p));
return;
#ifdef MZ_USE_PLACES
if (!MASTERGC || !(page = pagemap_find_page(MASTERGC->page_maps, p)))
#endif
{
GCDEBUG((DEBUGOUTF,"Not marking %p (no page)\n",p));
return;
}
}
/* toss this over to the BTC mark routine if we're doing accounting */
@ -2191,6 +2386,11 @@ static inline void propagate_marks_worker(PageMap pagemap, Mark_Proc *mark_table
mpage *page;
p = REMOVE_BIG_PAGE_PTR_TAG(pp);
page = pagemap_find_page(pagemap, p);
#ifdef MZ_USE_PLACES
if (!page) {
page = pagemap_find_page(MASTERGC->page_maps, p);
}
#endif
start = PPTR(BIG_PAGE_TO_OBJECT(page));
alloc_type = page->page_type;
end = PAGE_END_VSS(page);
@ -2526,9 +2726,7 @@ static void reset_gen1_page(NewGC *gc, mpage *work)
{
if (gc->generations_available && work->mprotected) {
work->mprotected = 0;
add_protect_page_range(gc->protect_range, work->addr,
(work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE,
APAGE_SIZE, 1);
add_protect_page_range(gc->protect_range, work->addr, real_page_size(work), APAGE_SIZE, 1);
}
}
@ -2564,9 +2762,7 @@ static void remove_gen1_page_from_pagemap(NewGC *gc, mpage *work)
{
if (gc->generations_available && work->back_pointers && work->mprotected) {
work->mprotected = 0;
add_protect_page_range(gc->protect_range, work->addr,
(work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE,
APAGE_SIZE, 1);
add_protect_page_range(gc->protect_range, work->addr, real_page_size(work), APAGE_SIZE, 1);
}
pagemap_remove(gc->page_maps, work);
work->added = 0;
@ -2685,6 +2881,7 @@ mpage *allocate_compact_target(NewGC *gc, mpage *work)
npage->page_type = work->page_type;
npage->marked_on = 1;
backtrace_new_page(gc, npage);
GCVERBOSEPAGE("NEW COMPACT PAGE", npage);
/* Link in this new replacement page */
npage->prev = work;
npage->next = work->next;
@ -2790,22 +2987,84 @@ inline static void do_heap_compact(NewGC *gc)
}
}
#ifdef KILLING_DEBUG
#include <ctype.h>
static void fprintf_buffer(FILE* file, char* buf, int l) {
int i;
for (i=0; i < l; i++ ) { fprintf(file, "%02hhx",buf[i]); }
fprintf(file, "\n");
for (i=0; i < l; i++ ) {
unsigned char c = buf[i];
if(isprint(c)) { fprintf(file, "%c ", c); }
else { fprintf(file, " "); }
}
fprintf(file, "\n");
}
static void fprintf_debug(NewGC *gc, const char *msg, objhead *info, FILE* file, int isgc) {
if (!isgc || postmaster_and_master_gc(gc)) {
Scheme_Object *obj = OBJHEAD_TO_OBJPTR(info);
fprintf(file, "%s %p ot %i it %i im %i is %i is >> 3 %i\n", msg, obj, obj->type, info->type, info->mark, info->size, info->size >> 3);
switch (obj->type) {
case scheme_unix_path_type:
if (pagemap_find_page(gc->page_maps, SCHEME_PATH_VAL(obj))) {
fprintf_buffer(file, SCHEME_PATH_VAL(obj), SCHEME_PATH_LEN(obj));
}
else {
fprintf(file, "%p already freed and out of bounds\n", SCHEME_PATH_VAL(obj));
}
break;
case scheme_symbol_type:
fprintf_buffer(file, SCHEME_SYM_VAL(obj), SCHEME_SYM_LEN(obj));
break;
case scheme_resolved_module_path_type:
if (pagemap_find_page(gc->page_maps, SCHEME_PTR_VAL(obj))) {
fprintf_debug(gc, "RMP ", OBJPTR_TO_OBJHEAD(SCHEME_PTR_VAL(obj)), file, isgc);
}
else {
fprintf(file, "RMP %p already freed and out of bounds\n", SCHEME_PATH_VAL(obj));
}
default:
fprintf_buffer(file, ((char *)obj), (info->size * WORD_SIZE));
break;
}
}
}
static void killing_debug(NewGC *gc, void *info) {
fprintf_debug(gc, "killing", (objhead *) info, gcdebugOUT(), 1);
}
static void marking_rmp_debug(NewGC *gc, void *info) {
fprintf_debug(gc, "marking rmp", (objhead *) info, gcdebugOUT(), 0);
}
#endif
static void repair_heap(NewGC *gc)
{
mpage *page;
int i;
Fixup_Proc *fixup_table = gc->fixup_table;
#ifdef MZ_USE_PLACES
int master_has_switched = postmaster_and_master_gc(gc);
#endif
for(i = 0; i < PAGE_TYPES; i++) {
for(page = gc->gen1_pages[i]; page; page = page->next) {
if(page->marked_on) {
#ifdef MZ_USE_PLACES
if (master_has_switched || page->marked_on) {
#else
if (page->marked_on) {
#endif
page->has_new = 0;
/* these are guaranteed not to be protected */
if(page->size_class) {
/* since we get here via gen1_pages, it's a big page */
void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
void **end = PAGE_END_VSS(page);
#ifdef MZ_USE_PLACES
objhead *info = BIG_PAGE_TO_OBJHEAD(page);
if (page->marked_on || info->mark) {
page->marked_on = 1;
#endif
GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
page, start));
page->size_class = 2; /* remove the mark */
@ -2828,6 +3087,9 @@ static void repair_heap(NewGC *gc)
break;
}
}
#ifdef MZ_USE_PLACES
}
#endif
} else {
void **start = PPTR(NUM(page->addr) + page->previous_size);
void **end = PAGE_END_VSS(page);
@ -2912,7 +3174,11 @@ static void repair_heap(NewGC *gc)
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (page = gc->med_pages[i]; page; page = page->next) {
#ifdef MZ_USE_PLACES
if (master_has_switched || page->marked_on) {
#else
if (page->marked_on) {
#endif
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size);
@ -2936,9 +3202,17 @@ static void repair_heap(NewGC *gc)
start += info->size;
}
break;
case PAGE_ATOMIC:
start += info->size;
}
info->mark = 0;
#ifdef MZ_USE_PLACES
page->marked_on = 1;
#endif
} else {
#ifdef KILLING_DEBUG
killing_debug(gc, info);
#endif
info->dead = 1;
start += info->size;
}
@ -2949,10 +3223,9 @@ static void repair_heap(NewGC *gc)
}
static inline void gen1_free_mpage(PageMap pagemap, mpage *page) {
size_t real_page_size = (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE;
pagemap_remove(pagemap, page);
free_backtrace(page);
free_pages(GC, page->addr, real_page_size);
free_pages(GC, page->addr, real_page_size(page));
free_mpage(page);
}
@ -2963,6 +3236,7 @@ static inline void cleanup_vacated_pages(NewGC *gc) {
/* Free pages vacated by compaction: */
while (pages) {
mpage *next = pages->next;
GCVERBOSEPAGE("Cleaning up vacated", pages);
gen1_free_mpage(pagemap, pages);
pages = next;
}
@ -2980,6 +3254,9 @@ inline static void gen0_free_big_pages(NewGC *gc) {
free_pages(gc, work->addr, round_to_apage_size(work->size));
free_mpage(work);
}
/* They are all gone, set the pointer to NULL */
gc->gen0.big_pages = NULL;
}
static void clean_up_heap(NewGC *gc)
@ -3000,6 +3277,7 @@ static void clean_up_heap(NewGC *gc)
/* remove work from list */
if(prev) prev->next = next; else gc->gen1_pages[i] = next;
if(next) work->next->prev = prev;
GCVERBOSEPAGE("Cleaning up BIGPAGE", work);
gen1_free_mpage(pagemap, work);
} else {
pagemap_add(pagemap, work);
@ -3050,6 +3328,7 @@ static void clean_up_heap(NewGC *gc)
/* free the page */
if(prev) prev->next = next; else gc->med_pages[i] = next;
if(next) work->next->prev = prev;
GCVERBOSEPAGE("Cleaning up MED PAGE NO OBJ", work);
gen1_free_mpage(pagemap, work);
}
} else if (gc->gc_full || !work->generation) {
@ -3058,6 +3337,7 @@ static void clean_up_heap(NewGC *gc)
next = work->next;
if(prev) prev->next = next; else gc->med_pages[i] = next;
if(next) work->next->prev = prev;
GCVERBOSEPAGE("Cleaning up MED NO MARKEDON", work);
gen1_free_mpage(pagemap, work);
} else {
/* not touched during minor gc */
@ -3076,6 +3356,34 @@ static void clean_up_heap(NewGC *gc)
cleanup_vacated_pages(gc);
}
static void unprotect_old_pages(NewGC *gc)
{
Page_Range *protect_range = gc->protect_range;
mpage *page;
int i;
for(i = 0; i < PAGE_TYPES; i++) {
if(i != PAGE_ATOMIC)
for(page = gc->gen1_pages[i]; page; page = page->next)
if(page->page_type != PAGE_ATOMIC) {
if (page->mprotected) {
page->mprotected = 0;
add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 1);
}
}
}
for (i = 0; i < NUM_MED_PAGE_SIZES; i++) {
for (page = gc->med_pages[i]; page; page = page->next) {
if (page->mprotected) {
page->mprotected = 0;
add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 1);
}
}
}
flush_protect_page_ranges(protect_range, 0);
}
static void protect_old_pages(NewGC *gc)
{
Page_Range *protect_range = gc->protect_range;
@ -3126,9 +3434,21 @@ extern double scheme_get_inexact_milliseconds(void);
static void garbage_collect(NewGC *gc, int force_full)
{
unsigned long old_mem_use = gc->memory_in_use;
unsigned long old_gen0 = gc->gen0.current_size;
unsigned long old_mem_use;
unsigned long old_gen0;
int next_gc_full;
#ifdef MZ_USE_PLACES
if (postmaster_and_place_gc(gc)) {
mzrt_rwlock_rdlock(MASTERGCINFO->cangc);
/* printf("RD MGCLOCK garbage_collect %i\n", GC_objhead_template.owner); */
}
#endif
old_mem_use = gc->memory_in_use;
old_gen0 = gc->gen0.current_size;
TIME_DECLS();
/* determine if this should be a full collection or not */
@ -3190,7 +3510,7 @@ static void garbage_collect(NewGC *gc, int force_full)
mark_immobiles(gc);
TIME_STEP("rooted");
#ifdef MZ_USE_PLACES
if (!is_master_gc(gc))
if (premaster_or_place_gc(gc))
#endif
GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
@ -3234,8 +3554,11 @@ static void garbage_collect(NewGC *gc, int force_full)
TIME_STEP("zeroed");
if(gc->gc_full) do_heap_compact(gc);
if(gc->gc_full)
#ifdef MZ_USE_PLACES
if (!MASTERGC)
#endif
do_heap_compact(gc);
TIME_STEP("compacted");
/* do some cleanup structures that either change state based on the
@ -3250,7 +3573,7 @@ static void garbage_collect(NewGC *gc, int force_full)
repair_roots(gc);
repair_immobiles(gc);
#ifdef MZ_USE_PLACES
if (!is_master_gc(gc))
if (premaster_or_place_gc(gc))
#endif
GC_fixup_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
TIME_STEP("reparied roots");
@ -3258,15 +3581,28 @@ static void garbage_collect(NewGC *gc, int force_full)
TIME_STEP("repaired");
clean_up_heap(gc);
TIME_STEP("cleaned heap");
reset_nursery(gc);
#ifdef MZ_USE_PLACES
if (premaster_or_place_gc(gc))
#endif
reset_nursery(gc);
TIME_STEP("reset nursurey");
#ifdef NEWGC_BTC_ACCOUNT
if (gc->gc_full)
BTC_do_accounting(gc);
#endif
TIME_STEP("accounted");
if (gc->generations_available)
if (gc->generations_available) {
#ifdef MZ_USE_PLACES
if (postmaster_and_master_gc(gc)) {
unprotect_old_pages(gc);
}
else {
protect_old_pages(gc);
}
#else
protect_old_pages(gc);
#endif
}
TIME_STEP("protect");
if (gc->gc_full)
vm_flush_freed_pages(gc->vm);
@ -3353,6 +3689,19 @@ static void garbage_collect(NewGC *gc, int force_full)
if (next_gc_full)
gc->full_needed_for_finalization = 1;
#ifdef MZ_USE_PLACES
if (postmaster_and_place_gc(gc)) {
if (gc->gc_full) {
NewGCMasterInfo_set_have_collected();
}
/* printf("UN RD MGCLOCK garbage_collect %i\n", GC_objhead_template.owner); */
mzrt_rwlock_unlock(MASTERGCINFO->cangc);
if (gc->gc_full) {
Master_collect();
}
}
#endif
}
#if MZ_GC_BACKTRACE
@ -3412,7 +3761,8 @@ void GC_free_all(void)
next = work->next;
if (work->mprotected)
vm_protect_pages(work->addr, (work->size_class > 1) ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
vm_protect_pages(work->addr, real_page_size(work), 1);
GCVERBOSEPAGE("Cleaning up GC DYING", work);
gen1_free_mpage(pagemap, work);
}
}

View File

@ -88,6 +88,8 @@ typedef struct Page_Range {
#ifdef MZ_USE_PLACES
typedef struct NewGCMasterInfo {
unsigned short next_GC_id;
unsigned char *have_collected;
void **signal_fds;
mzrt_rwlock *cangc;
} NewGCMasterInfo;
#endif
@ -173,6 +175,7 @@ typedef struct NewGC {
/* Distributed GC over places info */
#ifdef MZ_USE_PLACES
objhead saved_GC_objhead_template;
int major_places_gc; /* :1; */
#endif
struct mpage *thread_local_pages;

View File

@ -41,6 +41,8 @@ static void launchgdb() {
void fault_handler(int sn, struct siginfo *si, void *ctx)
{
void *p = si->si_addr;
int c = si->si_code;
int m = 0;
if (si->si_code != SEGV_ACCERR) { /*SEGV_MAPERR*/
printf("SIGSEGV fault on %p\n", p);
#if WAIT_FOR_GDB
@ -51,6 +53,12 @@ void fault_handler(int sn, struct siginfo *si, void *ctx)
if (!designate_modified(p)) {
if (si->si_code == SEGV_ACCERR) {
#ifdef MZ_USE_PLACES
if(pagemap_find_page(MASTERGC->page_maps, p)) {
m = 1;
printf("OWNED BY MASTER %p\n", p);
}
#endif
printf("mprotect fault on %p\n", p);
}
else {

View File

@ -365,6 +365,14 @@ Scheme_Env *scheme_engine_instance_init() {
scheme_init_parameterization_readonly_globals();
env = place_instance_init_post_kernel(1);
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
{
int signal_fd;
signal_fd = scheme_get_signal_handle();
GC_set_put_external_event_fd(signal_fd);
}
#endif
return env;
}
@ -498,8 +506,19 @@ static Scheme_Env *place_instance_init_post_kernel(int initial_main_os_thread) {
}
Scheme_Env *scheme_place_instance_init(void *stack_base) {
Scheme_Env *env;
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
int signal_fd;
GC_construct_child_gc();
#endif
place_instance_init_pre_kernel(stack_base);
return place_instance_init_post_kernel(0);
env = place_instance_init_post_kernel(0);
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
signal_fd = scheme_get_signal_handle();
GC_set_put_external_event_fd(signal_fd);
#endif
scheme_set_can_break(1);
return env;
}
void scheme_place_instance_destroy() {

View File

@ -35,6 +35,12 @@ START_XFORM_SUSPEND;
# endif
#endif
#ifndef MZ_PRECISE_GC
int GC_pthread_join(pthread_t thread, void **retval);
int GC_pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void*), void * arg);
int GC_pthread_detach(pthread_t thread);
#endif
void mzrt_set_user_break_handler(void (*user_break_handler)(int))
{
#ifdef WIN32
@ -227,6 +233,21 @@ void * mz_proc_thread_wait(mz_proc_thread *thread) {
#endif
}
int mz_proc_thread_detach(mz_proc_thread *thread) {
#ifdef WIN32
DWORD rc;
return (void *) rc;
#else
int rc;
# ifndef MZ_PRECISE_GC
rc = GC_pthread_detach(thread->threadid);
# else
rc = pthread_detach(thread->threadid);
# endif
return rc;
#endif
}
/***********************************************************************/
/* RW Lock */
/***********************************************************************/

View File

@ -42,6 +42,7 @@ typedef void *(mz_proc_thread_start)(void*);
mz_proc_thread* mzrt_proc_first_thread_init();
mz_proc_thread* mz_proc_thread_create(mz_proc_thread_start*, void* data);
void *mz_proc_thread_wait(mz_proc_thread *thread);
int mz_proc_thread_detach(mz_proc_thread *thread);
void mzrt_sleep(int seconds);

View File

@ -132,14 +132,70 @@ Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
return (Scheme_Object*) place;
}
#ifdef MZ_PRECISE_GC
typedef struct {
mz_proc_thread *proc_thread;
Scheme_Place *waiting_place;
int wake_fd;
int ready;
long rc;
} proc_thread_wait_data;
static void *mz_proc_thread_wait_worker(void *data) {
void *rc;
proc_thread_wait_data *wd = (proc_thread_wait_data*) data;
rc = mz_proc_thread_wait(wd->proc_thread);
wd->rc = (long) rc;
wd->ready = 1;
scheme_signal_received_at(&wd->wake_fd);
return NULL;
}
static int place_wait_ready(Scheme_Object *o) {
proc_thread_wait_data *wd = (proc_thread_wait_data*) o;
if (wd->ready) {
return 1;
}
return 0;
}
#endif
static Scheme_Object *scheme_place_wait(int argc, Scheme_Object *args[]) {
void *rc;
Scheme_Place *place;
place = (Scheme_Place *) args[0];
#ifdef MZ_PRECISE_GC
{
Scheme_Object *rc;
mz_proc_thread *worker_thread;
Scheme_Place *waiting_place;
int wake_fd;
rc = mz_proc_thread_wait((mz_proc_thread *)place->proc_thread);
return scheme_void;
proc_thread_wait_data *wd;
wd = (proc_thread_wait_data*) malloc(sizeof(proc_thread_wait_data));
wd->proc_thread = (mz_proc_thread *)place->proc_thread;
wd->waiting_place = waiting_place;
wake_fd = scheme_get_signal_handle();
wd->wake_fd = wake_fd;
wd->ready = 0;
worker_thread = mz_proc_thread_create(mz_proc_thread_wait_worker, wd);
mz_proc_thread_detach(worker_thread);
scheme_block_until(place_wait_ready, NULL, (Scheme_Object *) wd, 1.0);
rc = scheme_make_integer((long)wd->rc);
free(wd);
return rc;
}
#else
{
void *rcvoid;
rcvoid = mz_proc_thread_wait((mz_proc_thread *)place->proc_thread);
return scheme_make_integer((long) rcvoid);
}
#endif
}
static Scheme_Object *scheme_place_p(int argc, Scheme_Object *args[])
@ -181,6 +237,9 @@ Scheme_Object *scheme_places_deep_copy(Scheme_Object *so)
case scheme_char_string_type: /*43*/
new_so = scheme_make_sized_offset_char_string(SCHEME_CHAR_STR_VAL(so), 0, SCHEME_CHAR_STRLEN_VAL(so), 1);
break;
case scheme_byte_string_type:
new_so = scheme_make_sized_offset_byte_string(SCHEME_BYTE_STR_VAL(so), 0, SCHEME_BYTE_STRLEN_VAL(so), 1);
break;
case scheme_unix_path_type:
new_so = scheme_make_sized_offset_path(SCHEME_BYTE_STR_VAL(so), 0, SCHEME_BYTE_STRLEN_VAL(so), 1);
break;
@ -232,10 +291,8 @@ static void *place_start_proc(void *data_arg) {
null_out_runtime_globals();
/* scheme_make_thread behaves differently if the above global vars are not null */
#ifdef MZ_PRECISE_GC
GC_construct_child_gc();
#endif
scheme_place_instance_init(stack_base);
a[0] = place_data->current_library_collection_paths;
scheme_current_library_collection_paths(1, a);
@ -337,8 +394,8 @@ void spawn_master_scheme_place() {
mzrt_proc_first_thread_init();
//scheme_master_proc_thread = mz_proc_thread_create(master_scheme_place, NULL);
scheme_master_proc_thread = ~0;
/* scheme_master_proc_thread = mz_proc_thread_create(master_scheme_place, NULL); */
scheme_master_proc_thread = (void*) ~0;
}
#endif

View File

@ -4142,6 +4142,10 @@ void scheme_thread_block(float sleep_time)
}
#endif
/*####################################*/
/* THREAD CONTEXT SWITCH HAPPENS HERE */
/*####################################*/
if (next) {
/* Swap in `next', but first clear references to other threads. */
swap_target = next;
@ -4188,6 +4192,11 @@ void scheme_thread_block(float sleep_time)
if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
raise_break(p);
}
/* Check for major GC request from master GC */
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
GC_check_master_gc_request();
#endif
if (sleep_end > 0) {
if (sleep_end > scheme_get_inexact_milliseconds()) {