make memory accounting and places work together

The `current-memory-use' function's result now includes the memory
use of places created from the calling place, and custodian memory
limits apply to memory use by places (owned by the custodian).

This change is relevant to PR 12004 in that DrRacket will no longer
crash on the example if a memory limit is in effect, but plain
Racket starts with no such limit and will exhaust all memory.
This commit is contained in:
Matthew Flatt 2011-09-05 10:01:46 -06:00
parent 50d07dc67b
commit 6d944453a7
11 changed files with 423 additions and 96 deletions

View File

@ -1856,6 +1856,7 @@ path/s is either such a string or a list of them.
"collects/tests/racket/read.rktl" drdr:command-line #f
"collects/tests/racket/readtable.rktl" drdr:command-line (racket "-f" *)
"collects/tests/racket/restart.rktl" drdr:command-line (racket "-f" *)
"collects/tests/racket/runaway-place.rkt" drdr:command-line (racket "-tm" *)
"collects/tests/racket/runflats.rktl" drdr:command-line (racket "-f" *)
"collects/tests/racket/rx.rktl" drdr:command-line #f
"collects/tests/racket/sandbox.rktl" drdr:command-line (racket "-f" *)

View File

@ -0,0 +1,16 @@
#lang racket/base
(require racket/place)
(provide main runaway)
(define (main)
(parameterize ([current-custodian (make-custodian)])
(custodian-limit-memory (current-custodian) (* 1024 1024 64))
(parameterize ([current-custodian (make-custodian)])
(place-wait (place ch (runaway))))))
(define (runaway)
(printf "starting\n")
(define p (place ch (runaway)))
(place-wait p))

View File

@ -33,7 +33,8 @@ typedef void (*GC_collect_start_callback_Proc)(void);
typedef void (*GC_collect_end_callback_Proc)(void);
typedef void (*GC_collect_inform_callback_Proc)(int master_gc, int major_gc,
intptr_t pre_used, intptr_t post_used,
intptr_t pre_admin, intptr_t post_admin);
intptr_t pre_admin, intptr_t post_admin,
intptr_t post_child_places_used);
typedef uintptr_t (*GC_get_thread_stack_base_Proc)(void);
typedef void (*GC_Post_Propagate_Hook_Proc)(struct NewGC *);
/*
@ -105,11 +106,13 @@ GC2_EXTERN void GC_register_root_custodian(void *);
GC2_EXTERN void GC_register_new_thread(void *, void *);
/*
Indicates that a just-allocated point is for a thread record
owned by a particular custodian. */
Indicates that a just-allocated point is for a thread
or place owned by a particular custodian. */
GC2_EXTERN void GC_register_thread(void *, void *);
/*
Indicates that a a thread record is owned by a particular custodian. */
Indicates that a a thread or place is now owned by a
particular custodian. */
GC2_EXTERN GC_collect_start_callback_Proc GC_set_collect_start_callback(GC_collect_start_callback_Proc);
GC2_EXTERN GC_collect_end_callback_Proc GC_set_collect_end_callback(GC_collect_end_callback_Proc);
@ -150,6 +153,11 @@ GC2_EXTERN int GC_set_account_hook(int type, void *c1, uintptr_t b, void *c2);
Set a memory-accounting property. Returns 0 for failure (i.e., not
supported). */
GC2_EXTERN uintptr_t GC_get_account_memory_limit(void *c1);
/*
Returns a moemory accounting limit for c1 (or any ancestor),
or 0 if none is set. */
GC2_EXTERN void GC_gcollect(void);
/*
Performs an immediate (full) collection. */
@ -433,19 +441,33 @@ GC2_EXTERN void GC_write_barrier(void *p);
Explicit write barrier to ensure that a write-barrier signal is not
triggered by a memory write.
*/
GC2_EXTERN void GC_switch_out_master_gc();
/*
Makes the current GC the master GC.
Creates a new place specific GC and links it to the master GC.
*/
GC2_EXTERN void GC_construct_child_gc();
GC2_EXTERN struct NewGC *GC_get_current_instance();
/*
Creates a new place specific GC and links to the master GC.
Returns a representation of the current GC.
*/
GC2_EXTERN void GC_construct_child_gc(struct NewGC *parent_gc, intptr_t limit);
/*
Creates a new place-specific GC that is a child for memory-accounting
purposes of the give parent GC. If `limit' is not 0, set the maximum
amount of memory the new GC is supposed to use.
*/
GC2_EXTERN intptr_t GC_propagate_hierarchy_memory_use();
/*
Notifies the parent GC (if any) of memory use by the current GC
and its children. The result is total memory use. */
GC2_EXTERN void GC_destruct_child_gc();
/*
Destroys a place specific GC once the place has finished.
Destroys a place-specific GC once the place has finished.
*/
GC2_EXTERN void *GC_switch_to_master_gc();

View File

@ -12,6 +12,8 @@ static const int btc_redirect_custodian = 510;
static const int btc_redirect_ephemeron = 509;
static const int btc_redirect_cust_box = 508;
inline static void account_memory(NewGC *gc, int set, intptr_t amount);
/*****************************************************************************/
/* thread list */
/*****************************************************************************/
@ -23,7 +25,10 @@ inline static void BTC_register_new_thread(void *t, void *c)
GC_Thread_Info *work;
work = (GC_Thread_Info *)ofm_malloc(sizeof(GC_Thread_Info));
((Scheme_Thread *)t)->gc_info = work;
if (((Scheme_Object *)t)->type == scheme_thread_type)
((Scheme_Thread *)t)->gc_info = work;
else
((Scheme_Place *)t)->gc_info = work;
work->owner = current_owner(gc, (Scheme_Custodian *)c);
work->thread = t;
@ -35,8 +40,11 @@ inline static void BTC_register_thread(void *t, void *c)
{
NewGC *gc = GC_get_GC();
GC_Thread_Info *work;
work = ((Scheme_Thread *)t)->gc_info;
if (((Scheme_Object *)t)->type == scheme_thread_type)
work = ((Scheme_Thread *)t)->gc_info;
else
work = ((Scheme_Place *)t)->gc_info;
work->owner = current_owner(gc, (Scheme_Custodian *)c);
}
@ -45,15 +53,30 @@ inline static void mark_threads(NewGC *gc, int owner)
GC_Thread_Info *work;
Mark2_Proc thread_mark = gc->mark_table[btc_redirect_thread];
for(work = gc->thread_infos; work; work = work->next)
if(work->owner == owner) {
if (((Scheme_Thread *)work->thread)->running) {
thread_mark(work->thread, gc);
if (work->thread == scheme_current_thread) {
GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
for(work = gc->thread_infos; work; work = work->next) {
if (work->owner == owner) {
if (((Scheme_Object *)work->thread)->type == scheme_thread_type) {
/* thread */
if (((Scheme_Thread *)work->thread)->running) {
thread_mark(work->thread, gc);
if (work->thread == scheme_current_thread) {
GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL);
}
}
} else {
/* place */
/* add in the memory used by the place's GC */
intptr_t sz;
Scheme_Place_Object *place_obj = ((Scheme_Place *)work->thread)->place_obj;
if (place_obj) {
mzrt_mutex_lock(place_obj->lock);
sz = place_obj->memory_use;
mzrt_mutex_unlock(place_obj->lock);
account_memory(gc, owner, gcBYTES_TO_WORDS(sz));
}
}
}
}
}
inline static void clean_up_thread_list(NewGC *gc)
@ -355,10 +378,10 @@ inline static void BTC_initialize_mark_table(NewGC *gc) {
}
inline static int BTC_get_redirect_tag(NewGC *gc, int tag) {
if (tag == scheme_thread_type ) { tag = btc_redirect_thread; }
else if (tag == scheme_custodian_type ) { tag = btc_redirect_custodian; }
else if (tag == gc->ephemeron_tag ) { tag = btc_redirect_ephemeron; }
else if (tag == gc->cust_box_tag ) { tag = btc_redirect_cust_box; }
if (tag == scheme_thread_type) { tag = btc_redirect_thread; }
else if (tag == scheme_custodian_type) { tag = btc_redirect_custodian; }
else if (tag == gc->ephemeron_tag) { tag = btc_redirect_ephemeron; }
else if (tag == gc->cust_box_tag) { tag = btc_redirect_cust_box; }
return tag;
}
@ -535,7 +558,7 @@ inline static void BTC_run_account_hooks(NewGC *gc)
AccountHook *work = gc->hooks;
AccountHook *prev = NULL;
while(work) {
while (work) {
if( ((work->type == MZACCT_REQUIRE) &&
((gc->used_pages > (gc->max_pages_for_use / 2))
|| ((((gc->max_pages_for_use / 2) - gc->used_pages) * APAGE_SIZE)
@ -563,7 +586,7 @@ static uintptr_t custodian_single_time_limit(NewGC *gc, int set)
const int table_size = gc->owner_table_size;
if (!set)
return (uintptr_t)(intptr_t)-1;
return gc->place_memory_limit;
if (gc->reset_limits) {
int i;
@ -575,7 +598,7 @@ static uintptr_t custodian_single_time_limit(NewGC *gc, int set)
if (!owner_table[set]->limit_set) {
/* Check for limits on this custodian or one of its ancestors: */
uintptr_t limit = (uintptr_t)-1;
uintptr_t limit = gc->place_memory_limit;
Scheme_Custodian *orig = (Scheme_Custodian *) owner_table[set]->originator, *c;
AccountHook *work = gc->hooks;
@ -614,21 +637,39 @@ intptr_t BTC_get_memory_use(NewGC* gc, void *o)
return 0;
}
int BTC_single_allocation_limit(NewGC *gc, size_t sizeb) {
/* We're allowed to fail. Check for allocations that exceed a single-time
* limit. Otherwise, the limit doesn't work as intended, because
* a program can allocate a large block that nearly exhausts memory,
* and then a subsequent allocation can fail. As long as the limit
* is much smaller than the actual available memory, and as long as
* GC_out_of_memory protects any user-requested allocation whose size
* is independent of any existing object, then we can enforce the limit. */
int BTC_single_allocation_limit(NewGC *gc, size_t sizeb)
/* Use this function to check for allocations that exceed a single-time
* limit. Otherwise, the limit doesn't work as intended, because
* a program can allocate a large block that nearly exhausts memory,
* and then a subsequent allocation can fail. As long as the limit
* is much smaller than the actual available memory, and as long as
* GC_out_of_memory protects any user-requested allocation whose size
* is independent of any existing object, then we can enforce the limit. */
{
Scheme_Thread *p = scheme_current_thread;
if (p)
return (custodian_single_time_limit(gc, thread_get_owner(p)) < sizeb);
else
return 0;
return (gc->place_memory_limit < sizeb);
}
static uintptr_t BTC_get_account_hook(void *c1)
{
NewGC *gc = GC_get_GC();
uintptr_t mem;
if (!gc->really_doing_accounting)
return 0;
mem = custodian_single_time_limit(gc, custodian_to_owner_set(gc, c1));
if (mem == (uintptr_t)(intptr_t)-1)
return 0;
return mem;
}
static inline void BTC_clean_up(NewGC *gc) {
clean_up_thread_list(gc);
clean_up_owner_table(gc);

View File

@ -5,9 +5,7 @@
Please see full copyright in the documentation
Search for "FIXME" for known improvement points
IF YOU'RE NOT ADAM (AND PROBABLY IF YOU ARE) READ THIS FIRST:
This is now a hybrid copying/mark-compact collector. The nursery
This is a hybrid copying/mark-compact collector. The nursery
(generation 0) is copied into the old generation (generation 1),
but the old generation compacts. This yields a nice combination
of performance, scalability and memory efficiency.
@ -889,20 +887,15 @@ static void *allocate_big(const size_t request_size_bytes, int type)
#ifdef NEWGC_BTC_ACCOUNT
if(GC_out_of_memory) {
#ifdef MZ_USE_PLACES
if (premaster_or_place_gc(gc)) {
if (premaster_or_place_gc(gc)) {
#endif
if (BTC_single_allocation_limit(gc, request_size_bytes)) {
/* We're allowed to fail. Check for allocations that exceed a single-time
limit. Otherwise, the limit doesn't work as intended, because
a program can allocate a large block that nearly exhausts memory,
and then a subsequent allocation can fail. As long as the limit
is much smaller than the actual available memory, and as long as
GC_out_of_memory protects any user-requested allocation whose size
is independent of any existing object, then we can enforce the limit. */
GC_out_of_memory();
}
if (BTC_single_allocation_limit(gc, request_size_bytes)) {
/* We're allowed to fail. Check for allocations that exceed a single-time
limit. See BTC_single_allocation_limit() for more information. */
GC_out_of_memory();
}
#ifdef MZ_USE_PLACES
}
}
#endif
}
#endif
@ -1666,7 +1659,7 @@ inline static void master_set_max_size(NewGC *gc)
inline static void reset_nursery(NewGC *gc)
{
uintptr_t new_gen0_size;
uintptr_t new_gen0_size;
new_gen0_size = NUM((GEN0_SIZE_FACTOR * (float)gc->memory_in_use) + GEN0_SIZE_ADDITION);
if(new_gen0_size > GEN0_MAX_SIZE)
new_gen0_size = GEN0_MAX_SIZE;
@ -2255,12 +2248,28 @@ int GC_set_account_hook(int type, void *c1, uintptr_t b, void *c2)
#endif
}
uintptr_t GC_get_account_memory_limit(void *c1)
{
#ifdef NEWGC_BTC_ACCOUNT
NewGC *gc = GC_get_GC();
uintptr_t v = BTC_get_account_hook(c1);
if (gc->place_memory_limit < (uintptr_t)(intptr_t)-1) {
if (!v || (gc->place_memory_limit < v))
return gc->place_memory_limit;
}
return v;
#else
return 0;
#endif
}
void GC_register_thread(void *t, void *c)
{
#ifdef NEWGC_BTC_ACCOUNT
BTC_register_thread(t, c);
#endif
}
void GC_register_new_thread(void *t, void *c)
{
#ifdef NEWGC_BTC_ACCOUNT
@ -2560,14 +2569,15 @@ void GC_set_put_external_event_fd(void *fd) {
}
#endif
static void NewGC_initialize(NewGC *newgc, NewGC *parentgc) {
if (parentgc) {
newgc->mark_table = parentgc->mark_table;
newgc->fixup_table = parentgc->fixup_table;
newgc->dumping_avoid_collection = parentgc->dumping_avoid_collection - 1;
}
else {
static void NewGC_initialize(NewGC *newgc, NewGC *inheritgc, NewGC *parentgc) {
if (inheritgc) {
newgc->mark_table = inheritgc->mark_table;
newgc->fixup_table = inheritgc->fixup_table;
newgc->dumping_avoid_collection = inheritgc->dumping_avoid_collection - 1;
#ifdef MZ_USE_PLACES
newgc->parent_gc = parentgc;
#endif
} else {
#ifdef MZ_USE_PLACES
NewGCMasterInfo_initialize();
#endif
@ -2595,10 +2605,17 @@ static void NewGC_initialize(NewGC *newgc, NewGC *parentgc) {
newgc->generations_available = 1;
newgc->last_full_mem_use = (20 * 1024 * 1024);
newgc->new_btc_mark = 1;
newgc->place_memory_limit = (uintptr_t)(intptr_t)-1;
#ifdef MZ_USE_PLACES
mzrt_mutex_create(&newgc->child_total_lock);
#endif
}
/* NOTE This method sets the constructed GC as the new Thread Specific GC. */
static NewGC *init_type_tags_worker(NewGC *parentgc, int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
static NewGC *init_type_tags_worker(NewGC *inheritgc, NewGC *parentgc,
int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
{
NewGC *gc;
@ -2617,7 +2634,7 @@ static NewGC *init_type_tags_worker(NewGC *parentgc, int count, int pair, int mu
gc->cust_box_tag = custbox;
# endif
NewGC_initialize(gc, parentgc);
NewGC_initialize(gc, inheritgc, parentgc);
/* Our best guess at what the OS will let us allocate: */
@ -2631,7 +2648,7 @@ static NewGC *init_type_tags_worker(NewGC *parentgc, int count, int pair, int mu
gc->gen0.page_alloc_size = GEN0_PAGE_SIZE;
resize_gen0(gc, GEN0_INITIAL_SIZE);
if (!parentgc) {
if (!inheritgc) {
GC_register_traversers2(gc->weak_box_tag, size_weak_box, mark_weak_box, fixup_weak_box, 0, 0);
GC_register_traversers2(gc->ephemeron_tag, size_ephemeron, mark_ephemeron, fixup_ephemeron, 0, 0);
GC_register_traversers2(gc->weak_array_tag, size_weak_array, mark_weak_array, fixup_weak_array, 0, 0);
@ -2647,22 +2664,27 @@ void GC_init_type_tags(int count, int pair, int mutable_pair, int weakbox, int e
{
static int initialized = 0;
if(!initialized) {
if (!initialized) {
initialized = 1;
init_type_tags_worker(NULL, count, pair, mutable_pair, weakbox, ephemeron, weakarray, custbox);
}
else {
init_type_tags_worker(NULL, NULL, count, pair, mutable_pair, weakbox, ephemeron, weakarray, custbox);
} else {
GCPRINT(GCOUTF, "GC_init_type_tags should only be called once!\n");
abort();
}
}
struct NewGC *GC_get_current_instance() {
return GC_get_GC();
}
#ifdef MZ_USE_PLACES
void GC_construct_child_gc() {
void GC_construct_child_gc(struct NewGC *parent_gc, intptr_t limit) {
NewGC *gc = MASTERGC;
NewGC *newgc = init_type_tags_worker(gc, 0, 0, 0, gc->weak_box_tag, gc->ephemeron_tag, gc->weak_array_tag, gc->cust_box_tag);
NewGC *newgc = init_type_tags_worker(gc, parent_gc, 0, 0, 0, gc->weak_box_tag, gc->ephemeron_tag, gc->weak_array_tag, gc->cust_box_tag);
newgc->primoridal_gc = MASTERGC;
newgc->dont_master_gc_until_child_registers = 1;
if (limit)
newgc->place_memory_limit = limit;
}
void GC_destruct_child_gc() {
@ -2729,7 +2751,7 @@ void GC_switch_out_master_gc() {
MASTERGC->dumping_avoid_collection++;
save_globals_to_gc(MASTERGC);
GC_construct_child_gc();
GC_construct_child_gc(NULL, 0);
GC_allow_master_gc_check();
}
else {
@ -2825,12 +2847,19 @@ void GC_register_traversers(short tag, Size_Proc size, Mark_Proc mark,
intptr_t GC_get_memory_use(void *o)
{
NewGC *gc = GC_get_GC();
intptr_t amt;
#ifdef NEWGC_BTC_ACCOUNT
if(o) {
return BTC_get_memory_use(gc, o);
}
#endif
return gen0_size_in_use(gc) + gc->memory_in_use;
amt = gen0_size_in_use(gc) + gc->memory_in_use;
#ifdef MZ_USE_PLACES
mzrt_mutex_lock(gc->child_total_lock);
amt += gc->child_gc_total;
mzrt_mutex_unlock(gc->child_total_lock);
#endif
return amt;
}
/*****************************************************************************/
@ -4526,7 +4555,8 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master, Log
#endif
gc->GC_collect_inform_callback(is_master, gc->gc_full,
old_mem_use + old_gen0, gc->memory_in_use,
old_mem_allocated, mmu_memory_allocated(gc->mmu));
old_mem_allocated, mmu_memory_allocated(gc->mmu),
gc->child_gc_total);
}
#ifdef MZ_USE_PLACES
if (lmi) {
@ -4537,6 +4567,7 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master, Log
lmi->pre_admin = old_mem_allocated;
lmi->post_admin = mmu_memory_allocated(gc->mmu);
}
GC_propagate_hierarchy_memory_use();
#endif
TIME_STEP("ended");
@ -4601,7 +4632,8 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master, Log
if (gc->GC_collect_inform_callback) {
gc->GC_collect_inform_callback(1, sub_lmi.full,
sub_lmi.pre_used, sub_lmi.post_used,
sub_lmi.pre_admin, sub_lmi.post_admin);
sub_lmi.pre_admin, sub_lmi.post_admin,
0);
}
}
}
@ -4609,6 +4641,25 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master, Log
#endif
}
intptr_t GC_propagate_hierarchy_memory_use()
{
NewGC *gc = GC_get_GC();
#ifdef MZ_USE_PLACES
if (gc->parent_gc) {
/* report memory use to parent */
intptr_t total = gc->memory_in_use + gc->child_gc_total;
intptr_t delta = total - gc->previously_reported_total;
mzrt_mutex_lock(gc->parent_gc->child_total_lock);
gc->parent_gc->child_gc_total += delta;
mzrt_mutex_unlock(gc->parent_gc->child_total_lock);
gc->previously_reported_total = total;
}
#endif
return gc->memory_in_use + gc->child_gc_total;
}
#if MZ_GC_BACKTRACE
static GC_get_type_name_proc stack_get_type_name;

View File

@ -245,6 +245,15 @@ typedef struct NewGC {
Allocator *saved_allocator;
#ifdef MZ_USE_PLACES
struct NewGC *parent_gc; /* parent for the purpose of reporting memory use */
intptr_t previously_reported_total; /* how much we previously reported to the parent */
mzrt_mutex *child_total_lock; /* lock on `child_gc_total' */
#endif
intptr_t child_gc_total;
uintptr_t place_memory_limit; /* set to propagate a custodian limit from a parent place */
#if defined(GC_DEBUG_PAGES)
FILE *GCVERBOSEFH;
#endif

View File

@ -122,6 +122,7 @@ typedef struct Thread_Local_Variables {
uintptr_t GC_gen0_alloc_page_ptr_;
uintptr_t GC_gen0_alloc_page_end_;
int GC_gen0_alloc_only_;
uintptr_t force_gc_for_place_accounting_;
void *bignum_cache_[BIGNUM_CACHE_SIZE];
int cache_count_;
struct Scheme_Hash_Table *toplevels_ht_;
@ -459,6 +460,7 @@ XFORM_GC_VARIABLE_STACK_THROUGH_THREAD_LOCAL;
#define GC_gen0_alloc_page_end XOA (scheme_get_thread_local_variables()->GC_gen0_alloc_page_end_)
#define GC_gen0_alloc_only XOA (scheme_get_thread_local_variables()->GC_gen0_alloc_only_)
#define GC_variable_stack XOA (scheme_get_thread_local_variables()->GC_variable_stack_)
#define force_gc_for_place_accounting XOA (scheme_get_thread_local_variables()->force_gc_for_place_accounting_)
#define bignum_cache XOA (scheme_get_thread_local_variables()->bignum_cache_)
#define cache_count XOA (scheme_get_thread_local_variables()->cache_count_)
#define toplevels_ht XOA (scheme_get_thread_local_variables()->toplevels_ht_)

View File

@ -113,7 +113,6 @@ static Scheme_Object *rename_transformer_p(int argc, Scheme_Object *argv[]);
static void skip_certain_things(Scheme_Object *o, Scheme_Close_Custodian_Client *f, void *data);
Scheme_Env *scheme_engine_instance_init();
Scheme_Env *scheme_place_instance_init();
static Scheme_Env *place_instance_init(void *stack_base, int initial_main_os_thread);
#ifdef MZ_PRECISE_GC
@ -503,11 +502,11 @@ static Scheme_Env *place_instance_init(void *stack_base, int initial_main_os_thr
return env;
}
Scheme_Env *scheme_place_instance_init(void *stack_base) {
Scheme_Env *scheme_place_instance_init(void *stack_base, struct NewGC *parent_gc, intptr_t memory_limit) {
Scheme_Env *env;
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
int *signal_fd;
GC_construct_child_gc();
GC_construct_child_gc(parent_gc, memory_limit);
#endif
env = place_instance_init(stack_base, 0);
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)

View File

@ -41,7 +41,8 @@ static int id_counter;
static mzrt_mutex *id_counter_mutex;
SHARED_OK mz_proc_thread *scheme_master_proc_thread;
THREAD_LOCAL_DECL(struct Scheme_Place_Object *place_object);
THREAD_LOCAL_DECL(static struct Scheme_Place_Object *place_object);
THREAD_LOCAL_DECL(static uintptr_t force_gc_for_place_accounting);
static Scheme_Object *scheme_place(int argc, Scheme_Object *args[]);
static Scheme_Object *place_wait(int argc, Scheme_Object *args[]);
static Scheme_Object *place_kill(int argc, Scheme_Object *args[]);
@ -169,6 +170,8 @@ typedef struct Place_Start_Data {
Scheme_Object *current_library_collection_paths;
mzrt_sema *ready; /* malloc'ed item */
struct Scheme_Place_Object *place_obj; /* malloc'ed item */
struct NewGC *parent_gc;
Scheme_Object *cust_limit;
} Place_Start_Data;
static void null_out_runtime_globals() {
@ -205,6 +208,14 @@ Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
Scheme_Object *collection_paths;
Scheme_Place_Object *place_obj;
mzrt_sema *ready;
struct NewGC *parent_gc;
Scheme_Custodian *cust;
intptr_t mem_limit;
/* To avoid runaway place creation, check for termination before continuing. */
scheme_thread_block(0.0);
parent_gc = GC_get_current_instance();
/* create place object */
place = MALLOC_ONE_TAGGED(Scheme_Place);
@ -217,13 +228,24 @@ Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
place_obj->parent_signal_handle = handle;
}
/* The use_factor partly determines how often a child place notifies
a parent place that it is using more memory. If the child
notified the parent evey time its memory use increased, that
would probably be too often. But notifying every time the memory
use doubles isn't good enough, because a long chain of places
wouldn't alert parents often enough to limit total memory
use. Decreasing the factor for each generation means that the
alerts become more frequent as nesting gets deeper. */
place_obj->use_factor = (place_object ? (place_object->use_factor / 2) : 1.0);
mzrt_sema_create(&ready, 0);
/* pass critical info to new place */
place_data = MALLOC_ONE(Place_Start_Data);
place_data->ready = ready;
place_data->place_obj = place_obj;
place_data->parent_gc = parent_gc;
{
Scheme_Object *so;
@ -257,11 +279,17 @@ Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
collection_paths = places_deep_copy_to_master(collection_paths);
place_data->current_library_collection_paths = collection_paths;
cust = scheme_get_current_custodian();
mem_limit = GC_get_account_memory_limit(cust);
place_data->cust_limit = scheme_make_integer(mem_limit);
place_obj->memory_limit = mem_limit;
place_obj->parent_need_gc = &force_gc_for_place_accounting;
/* create new place */
proc_thread = mz_proc_thread_create(place_start_proc, place_data);
if (!proc_thread) {
mzrt_sema_destroy(ready);
mzrt_sema_destroy(ready);
scheme_signal_error("place: place creation failed");
}
@ -277,9 +305,7 @@ Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
place_data->place_obj = NULL;
{
Scheme_Custodian *cust;
Scheme_Custodian_Reference *mref;
cust = scheme_get_current_custodian();
mref = scheme_add_managed(NULL,
(Scheme_Object *)place,
cust_kill_place,
@ -288,6 +314,10 @@ Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
place->mref = mref;
}
#ifdef MZ_PRECISE_GC
GC_register_new_thread(place, cust);
#endif
return (Scheme_Object*) place;
}
@ -310,13 +340,16 @@ static void do_place_kill(Scheme_Place *place)
mzrt_mutex_unlock(place_obj->lock);
}
scheme_resume_one_place(place);
scheme_remove_managed(place->mref, (Scheme_Object *)place);
place->place_obj = NULL;
}
static int do_place_break(Scheme_Place *place) {
static int do_place_break(Scheme_Place *place)
{
Scheme_Place_Object *place_obj;
place_obj = (Scheme_Place_Object*) place->place_obj;
place_obj = place->place_obj;
{
mzrt_mutex_lock(place_obj->lock);
@ -332,7 +365,8 @@ static int do_place_break(Scheme_Place *place) {
return 0;
}
static void cust_kill_place(Scheme_Object *pl, void *notused) {
static void cust_kill_place(Scheme_Object *pl, void *notused)
{
do_place_kill((Scheme_Place *)pl);
}
@ -1734,28 +1768,121 @@ static void *place_start_proc(void *data_arg) {
return rc;
}
void scheme_pause_one_place(Scheme_Place *p)
{
Scheme_Place_Object *place_obj = p->place_obj;
if (place_obj) {
mzrt_mutex_lock(place_obj->lock);
if (!place_obj->pause) {
mzrt_sema *s;
mzrt_sema_create(&s, 0);
place_obj->pause = s;
}
mzrt_mutex_unlock(place_obj->lock);
}
}
void scheme_resume_one_place(Scheme_Place *p)
{
Scheme_Place_Object *place_obj = p->place_obj;
if (place_obj) {
mzrt_mutex_lock(place_obj->lock);
if (place_obj->pause) {
mzrt_sema *s = place_obj->pause;
place_obj->pause = NULL;
if (!place_obj->pausing) {
mzrt_sema_destroy(s);
} else {
mzrt_sema_post(s);
}
}
mzrt_mutex_unlock(place_obj->lock);
}
}
void scheme_place_check_for_interruption()
{
Scheme_Place_Object *place_obj;
char local_die;
char local_break;
mzrt_sema *local_pause;
place_obj = place_object;
if (!place_obj)
return;
while (1) {
mzrt_mutex_lock(place_obj->lock);
local_die = place_obj->die;
local_break = place_obj->pbreak;
local_pause = place_obj->pause;
place_obj->pbreak = 0;
if (local_pause)
place_obj->pausing = 1;
mzrt_mutex_unlock(place_obj->lock);
if (local_pause) {
scheme_pause_all_places();
mzrt_sema_wait(local_pause);
mzrt_sema_destroy(local_pause);
scheme_resume_all_places();
} else
break;
}
if (local_die)
scheme_kill_thread(scheme_main_thread);
if (local_break)
scheme_break_thread(NULL);
}
void scheme_place_set_memory_use(intptr_t mem_use)
{
Scheme_Place_Object *place_obj;
place_obj = place_object;
if (!place_obj)
return;
mzrt_mutex_lock(place_obj->lock);
local_die = place_obj->die;
local_break = place_obj->pbreak;
place_obj->pbreak = 0;
place_obj->memory_use = mem_use;
mzrt_mutex_unlock(place_obj->lock);
if (local_die)
scheme_kill_thread(scheme_main_thread);
if (local_break)
scheme_break_thread(NULL);
if (place_obj->parent_signal_handle && place_obj->memory_limit) {
if (mem_use > place_obj->memory_limit) {
/* tell the parent place to force a GC, and therefore check
custodian limits that will kill this place; pause this
place and its children to give the original place time
to kill this one */
scheme_pause_all_places();
mzrt_ensure_max_cas(place_obj->parent_need_gc, 1);
scheme_signal_received_at(place_obj->parent_signal_handle);
} else if (mem_use > (1 + place_obj->use_factor) * place_obj->prev_notify_memory_use) {
/* make sure the parent notices that we're using more memory: */
scheme_signal_received_at(place_obj->parent_signal_handle);
place_obj->prev_notify_memory_use = mem_use;
} else if (mem_use < place_obj->prev_notify_memory_use) {
place_obj->prev_notify_memory_use = mem_use;
}
}
}
void scheme_place_check_memory_use()
{
intptr_t m;
m = GC_propagate_hierarchy_memory_use();
scheme_place_set_memory_use(m);
if (force_gc_for_place_accounting) {
force_gc_for_place_accounting = 0;
scheme_collect_garbage();
scheme_resume_all_places();
}
}
static void place_set_result(Scheme_Object *result)
@ -1797,6 +1924,7 @@ static void *place_start_proc_after_stack(void *data_arg, void *stack_base) {
Scheme_Place_Object *place_obj;
Scheme_Object *place_main;
Scheme_Object *a[2], *channel;
intptr_t mem_limit;
mzrt_thread_id ptid;
ptid = mz_proc_thread_self();
@ -1812,8 +1940,10 @@ static void *place_start_proc_after_stack(void *data_arg, void *stack_base) {
scheme_current_place_id = ++id_counter;
mzrt_mutex_unlock(id_counter_mutex);
mem_limit = SCHEME_INT_VAL(place_data->cust_limit);
/* scheme_make_thread behaves differently if the above global vars are not null */
scheme_place_instance_init(stack_base);
scheme_place_instance_init(stack_base, place_data->parent_gc, mem_limit);
a[0] = scheme_places_deep_copy(place_data->current_library_collection_paths);
scheme_current_library_collection_paths(1, a);

View File

@ -3669,19 +3669,29 @@ typedef struct Scheme_Place {
Scheme_Object *channel;
Scheme_Custodian_Reference *mref;
intptr_t result; /* set when place_obj becomes NULL */
#ifdef MZ_PRECISE_GC
struct GC_Thread_Info *gc_info; /* managed by the GC */
#endif
} Scheme_Place;
typedef struct Scheme_Place_Object {
Scheme_Object so;
#if defined(MZ_USE_PLACES)
mzrt_mutex *lock;
mzrt_sema *pause;
#endif
char die;
char pbreak;
char pausing;
void *signal_handle;
void *parent_signal_handle; /* set to NULL when the place terminates */
intptr_t result; /* initialized to 1, reset when parent_signal_handle becomes NULL */
/*Thread_Local_Variables *tlvs; */
intptr_t memory_use; /* set by inform hook on GC, used by GC for memory accounting */
intptr_t prev_notify_memory_use; /* if memory_use > use_factor * prev_notify_memory_use, alert parent */
double use_factor;
intptr_t memory_limit; /* custodian-based limit on the place's memory use */
uintptr_t *parent_need_gc; /* ptr to a variable in parent to force a GC (triggering accounting) */
} Scheme_Place_Object;
typedef struct Scheme_Serialized_File_FD{
@ -3714,11 +3724,18 @@ void scheme_socket_to_output_port(intptr_t s, Scheme_Object *name, int takeover,
#define SCHEME_PLACE_OBJECTP(o) (SCHEME_TYPE(o) == scheme_place_object_type)
Scheme_Env *scheme_place_instance_init();
Scheme_Env *scheme_place_instance_init(void *stack_base, struct NewGC *, intptr_t memory_limit);
Scheme_Object *scheme_make_place_object();
void scheme_place_instance_destroy(int force);
void scheme_kill_green_thread_timer();
void scheme_place_check_for_interruption();
void scheme_check_place_port_ok();
void scheme_place_set_memory_use(intptr_t amt);
void scheme_place_check_memory_use();
void scheme_pause_all_places();
void scheme_pause_one_place(Scheme_Place *p);
void scheme_resume_all_places();
void scheme_resume_one_place(Scheme_Place *p);
#endif /* __mzscheme_private__ */

View File

@ -198,7 +198,7 @@ static void get_ready_for_GC(void);
static void done_with_GC(void);
#ifdef MZ_PRECISE_GC
static void inform_GC(int master_gc, int major_gc, intptr_t pre_used, intptr_t post_used,
intptr_t pre_admin, intptr_t post_admin);
intptr_t pre_admin, intptr_t post_admin, intptr_t post_child_places_used);
#endif
THREAD_LOCAL_DECL(static volatile short delayed_break_ready);
@ -988,7 +988,9 @@ static void adjust_custodian_family(void *mgr, void *skip_move)
o = WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
if (o)
GC_register_thread(o, parent);
}
} else if (SAME_TYPE(SCHEME_TYPE(o), scheme_place_type)) {
GC_register_thread(o, parent);
}
}
#endif
}
@ -1454,6 +1456,30 @@ static Scheme_Object *extract_thread(Scheme_Object *o)
return (Scheme_Object *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
}
static void pause_place(Scheme_Object *o)
{
#ifdef MZ_USE_PLACES
scheme_pause_one_place((Scheme_Place *)o);
#endif
}
void scheme_pause_all_places()
{
for_each_managed(scheme_place_type, pause_place);
}
static void resume_place(Scheme_Object *o)
{
#ifdef MZ_USE_PLACES
scheme_resume_one_place((Scheme_Place *)o);
#endif
}
void scheme_resume_all_places()
{
for_each_managed(scheme_place_type, resume_place);
}
void scheme_init_custodian_extractors()
{
if (!extractors) {
@ -4196,6 +4222,13 @@ void scheme_thread_block(float sleep_time)
if (!do_atomic)
scheme_place_check_for_interruption();
#endif
/* Propagate memory-use information and check for custodian-based
GC triggers due to child place memory use: */
#if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
scheme_place_check_memory_use();
check_scheduled_kills();
#endif
if (sleep_end > 0) {
if (sleep_end > scheme_get_inexact_milliseconds()) {
@ -7695,7 +7728,8 @@ static char *gc_num(char *nums, int v)
static void inform_GC(int master_gc, int major_gc,
intptr_t pre_used, intptr_t post_used,
intptr_t pre_admin, intptr_t post_admin)
intptr_t pre_admin, intptr_t post_admin,
intptr_t post_child_places_used)
{
Scheme_Logger *logger = scheme_get_main_logger();
if (logger) {
@ -7731,6 +7765,11 @@ static void inform_GC(int master_gc, int major_gc,
scheme_log_message(logger, SCHEME_LOG_DEBUG, buf, buflen, NULL);
}
#ifdef MZ_USE_PLACES
if (!master_gc) {
scheme_place_set_memory_use(post_used + post_child_places_used);
}
#endif
}
#endif