GC clean-ups related to disabled collections and place-shared space

The changes make shared-space allocation work in GC-stress mode
(forcing "master" GCs, just like other GCs).
This commit is contained in:
Matthew Flatt 2013-07-16 10:22:18 -06:00
parent 05ce59c54e
commit e59f888310
4 changed files with 51 additions and 56 deletions

View File

@ -94,7 +94,7 @@ static void print_traced_objects(int path_length_limit,
memset(ditto, 0, sizeof(void*) * DITTO_BUFFER_SIZE); memset(ditto, 0, sizeof(void*) * DITTO_BUFFER_SIZE);
GC_instance->dumping_avoid_collection++; GC_instance->avoid_collection++;
GCPRINT(GCOUTF, "Begin Trace\n"); GCPRINT(GCOUTF, "Begin Trace\n");
for (i = 0; i < found_object_count; i++) { for (i = 0; i < found_object_count; i++) {
void *p; void *p;
@ -130,5 +130,5 @@ static void print_traced_objects(int path_length_limit,
dp = (j % DITTO_BUFFER_SIZE); dp = (j % DITTO_BUFFER_SIZE);
} }
GCPRINT(GCOUTF, "End Trace\n"); GCPRINT(GCOUTF, "End Trace\n");
--GC_instance->dumping_avoid_collection; --GC_instance->avoid_collection;
} }

View File

@ -247,7 +247,7 @@ inline static uintptr_t custodian_usage(NewGC*gc, void *custodian)
int i; int i;
if(!gc->really_doing_accounting) { if(!gc->really_doing_accounting) {
if (!gc->dumping_avoid_collection) { if (!gc->avoid_collection) {
CHECK_PARK_UNUSED(gc); CHECK_PARK_UNUSED(gc);
gc->park[0] = custodian; gc->park[0] = custodian;
gc->really_doing_accounting = 1; gc->really_doing_accounting = 1;
@ -502,7 +502,7 @@ inline static void BTC_add_account_hook(int type,void *c1,void *c2,uintptr_t b)
AccountHook *work; AccountHook *work;
if(!gc->really_doing_accounting) { if(!gc->really_doing_accounting) {
if (!gc->dumping_avoid_collection) { if (!gc->avoid_collection) {
CHECK_PARK_UNUSED(gc); CHECK_PARK_UNUSED(gc);
gc->park[0] = c1; gc->park[0] = c1;
gc->park[1] = c2; gc->park[1] = c2;

View File

@ -309,6 +309,7 @@ void GC_set_post_propagate_hook(GC_Post_Propagate_Hook_Proc func) {
/* OS-Level Memory Management Routines */ /* OS-Level Memory Management Routines */
/*****************************************************************************/ /*****************************************************************************/
static void garbage_collect(NewGC*, int, int, Log_Master_Info*); static void garbage_collect(NewGC*, int, int, Log_Master_Info*);
static void collect_now(NewGC*, int);
static void out_of_memory() static void out_of_memory()
{ {
@ -352,20 +353,18 @@ inline static void check_used_against_max(NewGC *gc, size_t len)
page_count = size_to_apage_count(len); page_count = size_to_apage_count(len);
gc->used_pages += page_count; gc->used_pages += page_count;
if (gc->dumping_avoid_collection) return; if (gc->in_unsafe_allocation_mode) {
if (gc->used_pages > gc->max_pages_in_heap)
if(gc->in_unsafe_allocation_mode) {
if(gc->used_pages > gc->max_pages_in_heap)
gc->unsafe_allocation_abort(gc); gc->unsafe_allocation_abort(gc);
} else { } else if (!gc->avoid_collection) {
if(gc->used_pages > gc->max_pages_for_use) { if (gc->used_pages > gc->max_pages_for_use) {
garbage_collect(gc, 0, 0, NULL); /* hopefully this will free enough space */ collect_now(gc, 0); /* hopefully this will free enough space */
if(gc->used_pages > gc->max_pages_for_use) { if (gc->used_pages > gc->max_pages_for_use) {
garbage_collect(gc, 1, 0, NULL); /* hopefully *this* will free enough space */ collect_now(gc, 1); /* hopefully *this* will free enough space */
if(gc->used_pages > gc->max_pages_for_use) { if (gc->used_pages > gc->max_pages_for_use) {
/* too much memory allocated. /* too much memory allocated.
* Inform the thunk and then die semi-gracefully */ * Inform the thunk and then die semi-gracefully */
if(GC_out_of_memory) { if (GC_out_of_memory) {
gc->used_pages -= page_count; gc->used_pages -= page_count;
GC_out_of_memory(); GC_out_of_memory();
} }
@ -914,17 +913,22 @@ static int check_master_wants_to_collect() {
} }
#endif #endif
static inline void gc_if_needed_account_alloc_size(NewGC *gc, size_t allocate_size) { static void collect_now(NewGC *gc, int major)
if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) { {
#ifdef MZ_USE_PLACES #ifdef MZ_USE_PLACES
if (postmaster_and_master_gc(gc)) { if (postmaster_and_master_gc(gc))
master_collect_initiate(gc); master_collect_initiate(gc);
} else else
#endif #endif
{ garbage_collect(gc, major, 0, NULL);
if (!gc->dumping_avoid_collection) }
garbage_collect(gc, 0, 0, NULL);
}
static inline void gc_if_needed_account_alloc_size(NewGC *gc, size_t allocate_size)
{
if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) {
if (!gc->avoid_collection)
collect_now(gc, 0);
} }
gc->gen0.current_size += allocate_size; gc->gen0.current_size += allocate_size;
} }
@ -1171,8 +1175,8 @@ uintptr_t GC_make_jit_nursery_page(int count, uintptr_t *sz) {
intptr_t size = count * THREAD_LOCAL_PAGE_SIZE; intptr_t size = count * THREAD_LOCAL_PAGE_SIZE;
if((gc->gen0.current_size + size) >= gc->gen0.max_size) { if((gc->gen0.current_size + size) >= gc->gen0.max_size) {
if (!gc->dumping_avoid_collection) if (!gc->avoid_collection)
garbage_collect(gc, 0, 0, NULL); collect_now(gc, 0);
} }
gc->gen0.current_size += size; gc->gen0.current_size += size;
@ -1213,7 +1217,7 @@ inline static void gen0_free_mpage(NewGC *gc, mpage *page) {
#define OVERFLOWS_GEN0(ptr) ((ptr) > GC_gen0_alloc_page_end) #define OVERFLOWS_GEN0(ptr) ((ptr) > GC_gen0_alloc_page_end)
#ifdef MZ_GC_STRESS_TESTING #ifdef MZ_GC_STRESS_TESTING
# define GC_TRIGGER_COUNT 100 # define GC_TRIGGER_COUNT 11
static int stress_counter = 0; static int stress_counter = 0;
int scheme_gc_slow_path_started = 1; int scheme_gc_slow_path_started = 1;
static int TAKE_SLOW_PATH() static int TAKE_SLOW_PATH()
@ -1272,17 +1276,14 @@ inline static uintptr_t allocate_slowpath(NewGC *gc, size_t allocate_size, uintp
ASSERT_VALID_INFOPTR(GC_gen0_alloc_page_ptr); ASSERT_VALID_INFOPTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_ALLOC_SIZE(gc->gen0.curr_alloc_page); GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_ALLOC_SIZE(gc->gen0.curr_alloc_page);
} }
/* WARNING: tries to avoid a collection, but else if (gc->avoid_collection)
gen0_create_new_mpage() can cause a collection via
malloc_pages(), due to check_used_against_max() */
else if (gc->dumping_avoid_collection) {
gen0_allocate_and_setup_new_page(gc); gen0_allocate_and_setup_new_page(gc);
} else { else {
#ifdef INSTRUMENT_PRIMITIVES #ifdef INSTRUMENT_PRIMITIVES
LOG_PRIM_START(((void*)garbage_collect)); LOG_PRIM_START(((void*)garbage_collect));
#endif #endif
garbage_collect(gc, 0, 0, NULL); collect_now(gc, 0);
#ifdef INSTRUMENT_PRIMITIVES #ifdef INSTRUMENT_PRIMITIVES
LOG_PRIM_END(((void*)garbage_collect)); LOG_PRIM_END(((void*)garbage_collect));
@ -1319,9 +1320,9 @@ inline static void *allocate(const size_t request_size, const int type)
#ifdef MZ_GC_STRESS_TESTING #ifdef MZ_GC_STRESS_TESTING
if (TAKE_SLOW_PATH()) { if (TAKE_SLOW_PATH()) {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
if (!gc->dumping_avoid_collection) { if (!gc->avoid_collection) {
stress_counter = 0; stress_counter = 0;
garbage_collect(gc, 0, 0, NULL); collect_now(gc, 0);
} }
} }
#endif #endif
@ -1582,7 +1583,7 @@ void GC_create_message_allocator() {
GC_gen0_alloc_page_end = 0; GC_gen0_alloc_page_end = 0;
gc->in_unsafe_allocation_mode = 1; gc->in_unsafe_allocation_mode = 1;
gc->dumping_avoid_collection++; gc->avoid_collection++;
} }
void GC_report_unsent_message_delta(intptr_t amt) void GC_report_unsent_message_delta(intptr_t amt)
@ -1623,7 +1624,7 @@ void *GC_finish_message_allocator() {
gc->saved_allocator = NULL; gc->saved_allocator = NULL;
gc->in_unsafe_allocation_mode = 0; gc->in_unsafe_allocation_mode = 0;
gc->dumping_avoid_collection--; gc->avoid_collection--;
return (void *) msgm; return (void *) msgm;
} }
@ -2773,7 +2774,7 @@ static void NewGC_initialize(NewGC *newgc, NewGC *inheritgc, NewGC *parentgc) {
if (inheritgc) { if (inheritgc) {
newgc->mark_table = inheritgc->mark_table; newgc->mark_table = inheritgc->mark_table;
newgc->fixup_table = inheritgc->fixup_table; newgc->fixup_table = inheritgc->fixup_table;
newgc->dumping_avoid_collection = inheritgc->dumping_avoid_collection - 1; newgc->avoid_collection = 0;
#ifdef MZ_USE_PLACES #ifdef MZ_USE_PLACES
newgc->parent_gc = parentgc; newgc->parent_gc = parentgc;
#endif #endif
@ -2895,8 +2896,8 @@ void GC_construct_child_gc(struct NewGC *parent_gc, intptr_t limit) {
void GC_destruct_child_gc() { void GC_destruct_child_gc() {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
int waiting = 0; int waiting = 0;
do {
do {
mzrt_rwlock_wrlock(MASTERGCINFO->cangc); mzrt_rwlock_wrlock(MASTERGCINFO->cangc);
GC_LOCK_DEBUG("MGCLOCK GC_destruct_child_gc\n"); GC_LOCK_DEBUG("MGCLOCK GC_destruct_child_gc\n");
waiting = MASTERGC->major_places_gc; waiting = MASTERGC->major_places_gc;
@ -2938,7 +2939,7 @@ void GC_switch_out_master_gc() {
initialized = 1; initialized = 1;
if (!gc->dumping_avoid_collection) if (!gc->avoid_collection)
garbage_collect(gc, 1, 1, NULL); garbage_collect(gc, 1, 1, NULL);
#ifdef MZ_USE_PLACES #ifdef MZ_USE_PLACES
@ -2948,7 +2949,6 @@ void GC_switch_out_master_gc() {
#endif #endif
MASTERGC = gc; MASTERGC = gc;
MASTERGC->dumping_avoid_collection++;
save_globals_to_gc(MASTERGC); save_globals_to_gc(MASTERGC);
GC_construct_child_gc(NULL, 0); GC_construct_child_gc(NULL, 0);
@ -3001,27 +3001,22 @@ void GC_gcollect(void)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
if (gc->dumping_avoid_collection) return; if (gc->avoid_collection) return;
#ifdef MZ_USE_PLACES collect_now(gc, 1);
if (postmaster_and_master_gc(gc))
master_collect_initiate(gc);
else
#endif
garbage_collect(gc, 1, 0, NULL);
} }
void GC_gcollect_minor(void) void GC_gcollect_minor(void)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
if (gc->dumping_avoid_collection) return; if (gc->avoid_collection) return;
#ifdef MZ_USE_PLACES #ifdef MZ_USE_PLACES
if (postmaster_and_master_gc(gc)) return; if (postmaster_and_master_gc(gc)) return;
#endif #endif
garbage_collect(gc, 0, 0, NULL); collect_now(gc, 0);
} }
void GC_enable_collection(int on) void GC_enable_collection(int on)
@ -3029,9 +3024,9 @@ void GC_enable_collection(int on)
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
if (on) if (on)
--gc->dumping_avoid_collection; --gc->avoid_collection;
else else
gc->dumping_avoid_collection++; gc->avoid_collection++;
} }
void GC_register_traversers2(short tag, Size2_Proc size, Mark2_Proc mark, void GC_register_traversers2(short tag, Size2_Proc size, Mark2_Proc mark,
@ -3576,7 +3571,7 @@ void GC_dump_with_traces(int flags,
reset_object_traces(); reset_object_traces();
if (for_each_found) if (for_each_found)
gc->dumping_avoid_collection++; gc->avoid_collection++;
/* Traverse tagged pages to count objects: */ /* Traverse tagged pages to count objects: */
for (i = 0; i < MAX_DUMP_TAG; i++) { for (i = 0; i < MAX_DUMP_TAG; i++) {
@ -3740,7 +3735,7 @@ void GC_dump_with_traces(int flags,
} }
if (for_each_found) if (for_each_found)
--gc->dumping_avoid_collection; --gc->avoid_collection;
} }
void GC_dump(void) void GC_dump(void)
@ -4575,7 +4570,7 @@ static void park_for_inform_callback(NewGC *gc)
{ {
/* Avoid nested collections, which would need /* Avoid nested collections, which would need
nested parking spaces: */ nested parking spaces: */
gc->dumping_avoid_collection++; gc->avoid_collection++;
/* Inform might allocate, which might need park: */ /* Inform might allocate, which might need park: */
gc->park_isave[0] = gc->park[0]; gc->park_isave[0] = gc->park[0];
@ -4591,7 +4586,7 @@ static void unpark_for_inform_callback(NewGC *gc)
gc->park_isave[0] = NULL; gc->park_isave[0] = NULL;
gc->park_isave[1] = NULL; gc->park_isave[1] = NULL;
--gc->dumping_avoid_collection; --gc->avoid_collection;
} }
#if 0 #if 0

View File

@ -165,7 +165,7 @@ typedef struct NewGC {
mpage *release_pages; mpage *release_pages;
uintptr_t stack_base; uintptr_t stack_base;
int dumping_avoid_collection; /* dumping coutner flag */ int avoid_collection;
unsigned char generations_available :1; unsigned char generations_available :1;
unsigned char in_unsafe_allocation_mode :1; unsigned char in_unsafe_allocation_mode :1;