fix bloking operations during a try-atomic

and remove old delim-cont support used by gr1
This commit is contained in:
Matthew Flatt 2010-11-30 15:07:24 -07:00
parent 7efcf80856
commit ca8b32725e
9 changed files with 138 additions and 371 deletions

View File

@ -11,7 +11,7 @@
(define scheme_call_with_composable_no_dws (define scheme_call_with_composable_no_dws
(get-ffi-obj 'scheme_call_with_composable_no_dws #f (_fun _scheme _scheme -> _scheme))) (get-ffi-obj 'scheme_call_with_composable_no_dws #f (_fun _scheme _scheme -> _scheme)))
(define scheme_set_on_atomic_timeout (define scheme_set_on_atomic_timeout
(get-ffi-obj 'scheme_set_on_atomic_timeout #f (_fun (_fun -> _void) -> _pointer))) (get-ffi-obj 'scheme_set_on_atomic_timeout #f (_fun (_fun _int -> _void) -> _pointer)))
(define scheme_restore_on_atomic_timeout (define scheme_restore_on_atomic_timeout
(get-ffi-obj 'scheme_set_on_atomic_timeout #f (_fun _pointer -> _pointer))) (get-ffi-obj 'scheme_set_on_atomic_timeout #f (_fun _pointer -> _pointer)))
@ -59,8 +59,10 @@
[else [else
;; try to do some work: ;; try to do some work:
(let* ([ready? #f] (let* ([ready? #f]
[handler (lambda () [handler (lambda (must-give-up)
(when (and ready? (should-give-up?)) (when (and ready?
(or (positive? must-give-up)
(should-give-up?)))
(scheme_call_with_composable_no_dws (scheme_call_with_composable_no_dws
(lambda (proc) (lambda (proc)
(set-box! b (cons proc (unbox b))) (set-box! b (cons proc (unbox b)))

View File

@ -1710,7 +1710,7 @@ extern void *scheme_malloc_envunbox(size_t);
/* embedding configuration and hooks */ /* embedding configuration and hooks */
/*========================================================================*/ /*========================================================================*/
typedef void (*Scheme_On_Atomic_Timeout_Proc)(void); typedef void (*Scheme_On_Atomic_Timeout_Proc)(int must_give_up);
#if SCHEME_DIRECT_EMBEDDED #if SCHEME_DIRECT_EMBEDDED

View File

@ -4550,39 +4550,6 @@ static int mark_thread_cell_FIXUP(void *p, struct NewGC *gc) {
#define mark_thread_cell_IS_CONST_SIZE 1 #define mark_thread_cell_IS_CONST_SIZE 1
static int mark_frozen_tramp_SIZE(void *p, struct NewGC *gc) {
return
gcBYTES_TO_WORDS(sizeof(FrozenTramp));
}
static int mark_frozen_tramp_MARK(void *p, struct NewGC *gc) {
FrozenTramp *f = (FrozenTramp *)p;
gcMARK2(f->do_data, gc);
gcMARK2(f->old_param, gc);
gcMARK2(f->config, gc);
gcMARK2(f->progress_cont, gc);
return
gcBYTES_TO_WORDS(sizeof(FrozenTramp));
}
static int mark_frozen_tramp_FIXUP(void *p, struct NewGC *gc) {
FrozenTramp *f = (FrozenTramp *)p;
gcFIXUP2(f->do_data, gc);
gcFIXUP2(f->old_param, gc);
gcFIXUP2(f->config, gc);
gcFIXUP2(f->progress_cont, gc);
return
gcBYTES_TO_WORDS(sizeof(FrozenTramp));
}
#define mark_frozen_tramp_IS_ATOMIC 0
#define mark_frozen_tramp_IS_CONST_SIZE 1
#endif /* THREAD */ #endif /* THREAD */
/**********************************************************************/ /**********************************************************************/

View File

@ -1852,19 +1852,6 @@ mark_thread_cell {
gcBYTES_TO_WORDS(sizeof(Thread_Cell)); gcBYTES_TO_WORDS(sizeof(Thread_Cell));
} }
mark_frozen_tramp {
mark:
FrozenTramp *f = (FrozenTramp *)p;
gcMARK2(f->do_data, gc);
gcMARK2(f->old_param, gc);
gcMARK2(f->config, gc);
gcMARK2(f->progress_cont, gc);
size:
gcBYTES_TO_WORDS(sizeof(FrozenTramp));
}
END thread; END thread;
/**********************************************************************/ /**********************************************************************/

View File

@ -463,6 +463,8 @@ void scheme_suspend_remembered_threads(void);
void scheme_resume_remembered_threads(void); void scheme_resume_remembered_threads(void);
#endif #endif
int scheme_wait_until_suspend_ok(void);
#ifdef MZ_USE_MZRT #ifdef MZ_USE_MZRT
extern void scheme_check_foreign_work(void); extern void scheme_check_foreign_work(void);
#endif #endif

View File

@ -13,12 +13,12 @@
consistently.) consistently.)
*/ */
#define MZSCHEME_VERSION "5.0.99.3" #define MZSCHEME_VERSION "5.0.99.4"
#define MZSCHEME_VERSION_X 5 #define MZSCHEME_VERSION_X 5
#define MZSCHEME_VERSION_Y 0 #define MZSCHEME_VERSION_Y 0
#define MZSCHEME_VERSION_Z 99 #define MZSCHEME_VERSION_Z 99
#define MZSCHEME_VERSION_W 3 #define MZSCHEME_VERSION_W 4
#define MZSCHEME_VERSION_MAJOR ((MZSCHEME_VERSION_X * 100) + MZSCHEME_VERSION_Y) #define MZSCHEME_VERSION_MAJOR ((MZSCHEME_VERSION_X * 100) + MZSCHEME_VERSION_Y)
#define MZSCHEME_VERSION_MINOR ((MZSCHEME_VERSION_Z * 1000) + MZSCHEME_VERSION_W) #define MZSCHEME_VERSION_MINOR ((MZSCHEME_VERSION_Z * 1000) + MZSCHEME_VERSION_W)

View File

@ -25,6 +25,7 @@
READ_ONLY Scheme_Object *scheme_always_ready_evt; READ_ONLY Scheme_Object *scheme_always_ready_evt;
THREAD_LOCAL_DECL(Scheme_Object *scheme_system_idle_channel); THREAD_LOCAL_DECL(Scheme_Object *scheme_system_idle_channel);
extern int scheme_assert_atomic;
static Scheme_Object *make_sema(int n, Scheme_Object **p); static Scheme_Object *make_sema(int n, Scheme_Object **p);
static Scheme_Object *semap(int n, Scheme_Object **p); static Scheme_Object *semap(int n, Scheme_Object **p);
@ -93,7 +94,7 @@ void scheme_init_sema(Scheme_Env *env)
scheme_add_global_constant("make-semaphore", scheme_add_global_constant("make-semaphore",
scheme_make_prim_w_arity(make_sema, scheme_make_prim_w_arity(make_sema,
"make-semaphore", "make-semaphore",
0, 1), 0, 2),
env); env);
scheme_add_global_constant("semaphore?", scheme_add_global_constant("semaphore?",
scheme_make_folding_prim(semap, scheme_make_folding_prim(semap,
@ -226,6 +227,7 @@ Scheme_Object *scheme_make_sema(long v)
static Scheme_Object *make_sema(int n, Scheme_Object **p) static Scheme_Object *make_sema(int n, Scheme_Object **p)
{ {
long v; long v;
Scheme_Object *s;
if (n) { if (n) {
if (!SCHEME_INTP(p[0])) { if (!SCHEME_INTP(p[0])) {
@ -242,7 +244,12 @@ static Scheme_Object *make_sema(int n, Scheme_Object **p)
} else } else
v = 0; v = 0;
return scheme_make_sema(v); s = scheme_make_sema(v);
if (n > 1)
SCHEME_CPTR_FLAGS(s) |= 0x1;
return s;
} }
static Scheme_Object *make_sema_repost(int n, Scheme_Object **p) static Scheme_Object *make_sema_repost(int n, Scheme_Object **p)
@ -315,6 +322,10 @@ void scheme_post_sema(Scheme_Object *o)
} else } else
consumed = 0; consumed = 0;
if (!consumed)
if (SCHEME_CPTR_FLAGS(o) & 0x1)
printf("here\n");
w->in_line = 0; w->in_line = 0;
w->prev = NULL; w->prev = NULL;
w->next = NULL; w->next = NULL;
@ -633,26 +644,47 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
} else } else
start_pos = 0; start_pos = 0;
/* Initial poll */ scheme_assert_atomic++;
i = 0;
for (ii = 0; ii < n; ii++) {
/* Randomized start position for poll ensures fairness: */
i = (start_pos + ii) % n;
if (semas[i]->so.type == scheme_sema_type) { /* Initial poll */
if (semas[i]->value) { while (1) {
if ((semas[i]->value > 0) && (!syncing || !syncing->reposts || !syncing->reposts[i])) i = 0;
--semas[i]->value; for (ii = 0; ii < n; ii++) {
if (syncing && syncing->accepts && syncing->accepts[i]) /* Randomized start position for poll ensures fairness: */
scheme_accept_sync(syncing, i); i = (start_pos + ii) % n;
break;
} if (semas[i]->so.type == scheme_sema_type) {
} else if (semas[i]->so.type == scheme_never_evt_type) { if (semas[i]->value) {
/* Never ready. */ if ((semas[i]->value > 0) && (!syncing || !syncing->reposts || !syncing->reposts[i]))
} else if (semas[i]->so.type == scheme_channel_syncer_type) { --semas[i]->value;
/* Probably no need to poll */ if (syncing && syncing->accepts && syncing->accepts[i])
} else if (try_channel(semas[i], syncing, i, NULL)) scheme_accept_sync(syncing, i);
break; break;
}
} else if (semas[i]->so.type == scheme_never_evt_type) {
/* Never ready. */
} else if (semas[i]->so.type == scheme_channel_syncer_type) {
/* Probably no need to poll */
} else if (try_channel(semas[i], syncing, i, NULL))
break;
}
if (ii >= n) {
if (!scheme_current_thread->next)
break;
else {
--scheme_assert_atomic;
if (!scheme_wait_until_suspend_ok()) {
scheme_assert_atomic++;
break;
} else {
/* there may have been some action on one of the waitables;
try again */
scheme_assert_atomic++;
}
}
} else
break;
} }
/* In the following, syncers get changed back to channels, /* In the following, syncers get changed back to channels,
@ -700,7 +732,9 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
scheme_main_was_once_suspended = 0; scheme_main_was_once_suspended = 0;
scheme_assert_atomic--;
scheme_block_until(out_of_line, NULL, (Scheme_Object *)a, (float)0.0); scheme_block_until(out_of_line, NULL, (Scheme_Object *)a, (float)0.0);
scheme_assert_atomic++;
--scheme_current_thread->suspend_break; --scheme_current_thread->suspend_break;
} else { } else {
@ -710,7 +744,9 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
old_nkc = (scheme_current_thread->running & MZTHREAD_NEED_KILL_CLEANUP); old_nkc = (scheme_current_thread->running & MZTHREAD_NEED_KILL_CLEANUP);
if (!old_nkc) if (!old_nkc)
scheme_current_thread->running += MZTHREAD_NEED_KILL_CLEANUP; scheme_current_thread->running += MZTHREAD_NEED_KILL_CLEANUP;
scheme_assert_atomic--;
scheme_weak_suspend_thread(scheme_current_thread); scheme_weak_suspend_thread(scheme_current_thread);
scheme_assert_atomic++;
if (!old_nkc && (scheme_current_thread->running & MZTHREAD_NEED_KILL_CLEANUP)) if (!old_nkc && (scheme_current_thread->running & MZTHREAD_NEED_KILL_CLEANUP))
scheme_current_thread->running -= MZTHREAD_NEED_KILL_CLEANUP; scheme_current_thread->running -= MZTHREAD_NEED_KILL_CLEANUP;
} }
@ -758,7 +794,9 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
get_outof_line(semas[i], ws[i]); get_outof_line(semas[i], ws[i]);
} }
scheme_assert_atomic--;
scheme_thread_block(0); /* ok if it returns multiple times */ scheme_thread_block(0); /* ok if it returns multiple times */
scheme_assert_atomic++;
scheme_current_thread->ran_some = 1; scheme_current_thread->ran_some = 1;
/* [but why would it return multiple times?! there must have been a reason...] */ /* [but why would it return multiple times?! there must have been a reason...] */
} else { } else {
@ -800,6 +838,8 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
} }
} }
scheme_assert_atomic--;
if (i == -1) { if (i == -1) {
scheme_thread_block(0); /* dies or suspends */ scheme_thread_block(0); /* dies or suspends */
scheme_current_thread->ran_some = 1; scheme_current_thread->ran_some = 1;
@ -807,6 +847,8 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
if (i < n) if (i < n)
break; break;
scheme_assert_atomic++;
} }
/* Otherwise: !syncing and someone stole the post, or we were /* Otherwise: !syncing and someone stole the post, or we were
@ -837,6 +879,7 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
get_outof_line(semas[j], ws[j]); get_outof_line(semas[j], ws[j]);
} }
scheme_assert_atomic--;
break; break;
} }
@ -861,7 +904,8 @@ int scheme_wait_semas_chs(int n, Scheme_Object **o, int just_try, Syncing *synci
} }
/* Back to top of loop to sync again */ /* Back to top of loop to sync again */
} }
} } else
scheme_assert_atomic--;
v = i + 1; v = i + 1;
} }

View File

@ -257,8 +257,7 @@ enum {
scheme_rt_sfs_info, /* 233 */ scheme_rt_sfs_info, /* 233 */
scheme_rt_validate_clearing, /* 234 */ scheme_rt_validate_clearing, /* 234 */
scheme_rt_rb_node, /* 235 */ scheme_rt_rb_node, /* 235 */
scheme_rt_frozen_tramp, /* 236 */ scheme_rt_lightweight_cont, /* 236 */
scheme_rt_lightweight_cont, /* 237 */
#endif #endif

View File

@ -128,6 +128,8 @@ extern int scheme_jit_malloced;
# define scheme_jit_malloced 0 # define scheme_jit_malloced 0
#endif #endif
int scheme_assert_atomic;
/*========================================================================*/ /*========================================================================*/
/* local variables and prototypes */ /* local variables and prototypes */
/*========================================================================*/ /*========================================================================*/
@ -206,7 +208,7 @@ HOOK_SHARED_OK void (*scheme_sleep)(float seconds, void *fds);
HOOK_SHARED_OK void (*scheme_notify_multithread)(int on); HOOK_SHARED_OK void (*scheme_notify_multithread)(int on);
HOOK_SHARED_OK void (*scheme_wakeup_on_input)(void *fds); HOOK_SHARED_OK void (*scheme_wakeup_on_input)(void *fds);
HOOK_SHARED_OK int (*scheme_check_for_break)(void); HOOK_SHARED_OK int (*scheme_check_for_break)(void);
HOOK_SHARED_OK void (*scheme_on_atomic_timeout)(void); HOOK_SHARED_OK Scheme_On_Atomic_Timeout_Proc scheme_on_atomic_timeout;
HOOK_SHARED_OK static int atomic_timeout_auto_suspend; HOOK_SHARED_OK static int atomic_timeout_auto_suspend;
HOOK_SHARED_OK static int atomic_timeout_atomic_level; HOOK_SHARED_OK static int atomic_timeout_atomic_level;
@ -214,7 +216,6 @@ THREAD_LOCAL_DECL(struct Scheme_GC_Pre_Post_Callback_Desc *gc_prepost_callback_d
ROSYM static Scheme_Object *read_symbol, *write_symbol, *execute_symbol, *delete_symbol, *exists_symbol; ROSYM static Scheme_Object *read_symbol, *write_symbol, *execute_symbol, *delete_symbol, *exists_symbol;
ROSYM static Scheme_Object *client_symbol, *server_symbol; ROSYM static Scheme_Object *client_symbol, *server_symbol;
ROSYM static Scheme_Object *froz_key;
THREAD_LOCAL_DECL(static int do_atomic = 0); THREAD_LOCAL_DECL(static int do_atomic = 0);
THREAD_LOCAL_DECL(static int missed_context_switch = 0); THREAD_LOCAL_DECL(static int missed_context_switch = 0);
@ -380,7 +381,6 @@ static void make_initial_config(Scheme_Thread *p);
static int do_kill_thread(Scheme_Thread *p); static int do_kill_thread(Scheme_Thread *p);
static void suspend_thread(Scheme_Thread *p); static void suspend_thread(Scheme_Thread *p);
static void wait_until_suspend_ok(int for_stack);
static int check_sleep(int need_activity, int sleep_now); static int check_sleep(int need_activity, int sleep_now);
@ -471,9 +471,6 @@ void scheme_init_thread(Scheme_Env *env)
client_symbol = scheme_intern_symbol("client"); client_symbol = scheme_intern_symbol("client");
server_symbol = scheme_intern_symbol("server"); server_symbol = scheme_intern_symbol("server");
REGISTER_SO(froz_key);
froz_key = scheme_make_symbol("frozen"); /* uninterned */
scheme_add_global_constant("dump-memory-stats", scheme_add_global_constant("dump-memory-stats",
scheme_make_prim_w_arity(scheme_dump_gc_stats, scheme_make_prim_w_arity(scheme_dump_gc_stats,
"dump-memory-stats", "dump-memory-stats",
@ -2627,6 +2624,9 @@ static void do_swap_thread()
swapping = 1; swapping = 1;
#endif #endif
if (scheme_assert_atomic)
*(long *)0x0 = 1;
if (!swap_no_setjmp && SETJMP(scheme_current_thread)) { if (!swap_no_setjmp && SETJMP(scheme_current_thread)) {
/* We're back! */ /* We're back! */
/* See also initial swap in in start_child() */ /* See also initial swap in in start_child() */
@ -3311,10 +3311,6 @@ Scheme_Object *scheme_thread_w_details(Scheme_Object *thunk,
if (scheme_is_stack_too_shallow()) { if (scheme_is_stack_too_shallow()) {
Scheme_Thread *p = scheme_current_thread; Scheme_Thread *p = scheme_current_thread;
/* Don't mangle the stack if we're in atomic mode, because that
probably means a stack-freeze trampoline, etc. */
wait_until_suspend_ok(1);
p->ku.k.p1 = thunk; p->ku.k.p1 = thunk;
p->ku.k.p2 = config; p->ku.k.p2 = config;
p->ku.k.p3 = mgr; p->ku.k.p3 = mgr;
@ -3379,7 +3375,7 @@ Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], voi
SCHEME_USE_FUEL(25); SCHEME_USE_FUEL(25);
wait_until_suspend_ok(0); scheme_wait_until_suspend_ok();
np = MALLOC_ONE_TAGGED(Scheme_Thread); np = MALLOC_ONE_TAGGED(Scheme_Thread);
np->so.type = scheme_thread_type; np->so.type = scheme_thread_type;
@ -4051,6 +4047,43 @@ void scheme_break_thread(Scheme_Thread *p)
# endif # endif
} }
static void call_on_atomic_timeout(int must)
{
Scheme_Thread *p = scheme_current_thread;
int running;
double sleep_end;
int block_descriptor;
Scheme_Object *blocker;
Scheme_Ready_Fun block_check;
Scheme_Needs_Wakeup_Fun block_needs_wakeup;
/* Save any state that has to do with the thread blocking or
sleeping, in case scheme_on_atomic_timeout() runs Racket code. */
running = p->running;
sleep_end = p->sleep_end;
block_descriptor = p->block_descriptor;
blocker = p->blocker;
block_check = p->block_check;
block_needs_wakeup = p->block_needs_wakeup;
p->running = MZTHREAD_RUNNING;
p->sleep_end = 0.0;
p->block_descriptor = 0;
p->blocker = NULL;
p->block_check = NULL;
p->block_needs_wakeup = NULL;
scheme_on_atomic_timeout(must);
p->running = running;
p->sleep_end = sleep_end;
p->block_descriptor = block_descriptor;
p->blocker = blocker;
p->block_check = block_check;
p->block_needs_wakeup = block_needs_wakeup;
}
static void find_next_thread(Scheme_Thread **return_arg) { static void find_next_thread(Scheme_Thread **return_arg) {
Scheme_Thread *next; Scheme_Thread *next;
Scheme_Thread *p = scheme_current_thread; Scheme_Thread *p = scheme_current_thread;
@ -4212,7 +4245,7 @@ void scheme_thread_block(float sleep_time)
if ((p->running & MZTHREAD_USER_SUSPENDED) if ((p->running & MZTHREAD_USER_SUSPENDED)
&& !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) { && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
/* This thread was suspended. */ /* This thread was suspended. */
wait_until_suspend_ok(0); scheme_wait_until_suspend_ok();
if (!p->next) { if (!p->next) {
/* Suspending the main thread... */ /* Suspending the main thread... */
select_thread(); select_thread();
@ -4311,9 +4344,9 @@ void scheme_thread_block(float sleep_time)
} }
#endif #endif
/*####################################*/ /*####################################*/
/* THREAD CONTEXT SWITCH HAPPENS HERE */ /* THREAD CONTEXT SWITCH HAPPENS HERE */
/*####################################*/ /*####################################*/
if (next) { if (next) {
/* Swap in `next', but first clear references to other threads. */ /* Swap in `next', but first clear references to other threads. */
@ -4329,7 +4362,7 @@ void scheme_thread_block(float sleep_time)
scheme_fuel_counter = p->engine_weight; scheme_fuel_counter = p->engine_weight;
scheme_jit_stack_boundary = scheme_stack_boundary; scheme_jit_stack_boundary = scheme_stack_boundary;
} }
scheme_on_atomic_timeout(); call_on_atomic_timeout(0);
if (atomic_timeout_auto_suspend > 1) if (atomic_timeout_auto_suspend > 1)
--atomic_timeout_auto_suspend; --atomic_timeout_auto_suspend;
} }
@ -4360,7 +4393,7 @@ void scheme_thread_block(float sleep_time)
/* Suspended while I was asleep? */ /* Suspended while I was asleep? */
if ((p->running & MZTHREAD_USER_SUSPENDED) if ((p->running & MZTHREAD_USER_SUSPENDED)
&& !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) { && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
wait_until_suspend_ok(0); scheme_wait_until_suspend_ok();
if (!p->next) if (!p->next)
scheme_thread_block(0.0); /* main thread handled at top of this function */ scheme_thread_block(0.0); /* main thread handled at top of this function */
else else
@ -4592,22 +4625,24 @@ void scheme_end_atomic_can_break(void)
scheme_check_break_now(); scheme_check_break_now();
} }
static void wait_until_suspend_ok(int for_stack) int scheme_wait_until_suspend_ok(void)
{ {
if (scheme_on_atomic_timeout && atomic_timeout_auto_suspend) { int did = 0;
if (scheme_on_atomic_timeout) {
/* new-style atomic timeout */ /* new-style atomic timeout */
if (for_stack) { if (do_atomic > atomic_timeout_atomic_level) {
/* a stack overflow is ok for the new-style timeout */
return;
} else if (do_atomic > atomic_timeout_atomic_level) {
scheme_log_abort("attempted to wait for suspend in nested atomic mode"); scheme_log_abort("attempted to wait for suspend in nested atomic mode");
abort(); abort();
} }
} }
while (do_atomic && scheme_on_atomic_timeout) { while (do_atomic && scheme_on_atomic_timeout) {
scheme_on_atomic_timeout(); did = 1;
call_on_atomic_timeout(1);
} }
return did;
} }
Scheme_On_Atomic_Timeout_Proc scheme_set_on_atomic_timeout(Scheme_On_Atomic_Timeout_Proc p) Scheme_On_Atomic_Timeout_Proc scheme_set_on_atomic_timeout(Scheme_On_Atomic_Timeout_Proc p)
@ -4631,10 +4666,6 @@ void scheme_weak_suspend_thread(Scheme_Thread *r)
if (r->running & MZTHREAD_SUSPENDED) if (r->running & MZTHREAD_SUSPENDED)
return; return;
if (r == scheme_current_thread) {
wait_until_suspend_ok(0);
}
if (r->prev) { if (r->prev) {
r->prev->next = r->next; r->prev->next = r->next;
r->next->prev = r->prev; r->next->prev = r->prev;
@ -4679,7 +4710,6 @@ void scheme_weak_resume_thread(Scheme_Thread *r)
void scheme_about_to_move_C_stack(void) void scheme_about_to_move_C_stack(void)
{ {
wait_until_suspend_ok(1);
} }
static Scheme_Object * static Scheme_Object *
@ -4791,7 +4821,7 @@ void scheme_kill_thread(Scheme_Thread *p)
{ {
if (do_kill_thread(p)) { if (do_kill_thread(p)) {
/* Suspend/kill self: */ /* Suspend/kill self: */
wait_until_suspend_ok(0); scheme_wait_until_suspend_ok();
if (p->suspend_to_kill) if (p->suspend_to_kill)
suspend_thread(p); suspend_thread(p);
else else
@ -4921,7 +4951,7 @@ static void suspend_thread(Scheme_Thread *p)
p->running |= MZTHREAD_USER_SUSPENDED; p->running |= MZTHREAD_USER_SUSPENDED;
} else { } else {
if (p == scheme_current_thread) { if (p == scheme_current_thread) {
wait_until_suspend_ok(0); scheme_wait_until_suspend_ok();
} }
p->running |= MZTHREAD_USER_SUSPENDED; p->running |= MZTHREAD_USER_SUSPENDED;
scheme_weak_suspend_thread(p); /* ok if p is scheme_current_thread */ scheme_weak_suspend_thread(p); /* ok if p is scheme_current_thread */
@ -8081,269 +8111,6 @@ void scheme_free_gmp(void *p, void **mem_pool)
*mem_pool = SCHEME_CDR(*mem_pool); *mem_pool = SCHEME_CDR(*mem_pool);
} }
/*========================================================================*/
/* stack freezer */
/*========================================================================*/
/* When interacting with certain libraries that can lead to Scheme
callbacks, the stack region used by the library should not be
modified by Scheme thread swaps. In that case, the callback must be
constrained. Completely disallowing synchornization with ther
threads or unbounded computation, however, is sometimes too
difficult. A stack-freezer sequence offer a compromise, where the
callback is run as much as possible, but it can be suspended to
allow the library call to return so that normal Scheme-thread
scheduling can resume. The callback is then completed in a normal
scheduling context, where it is no longer specially constrained.
The call process is
scheme_with_stack_freeze(f, data)
-> f(data) in frozen mode
-> ... frozen_run_some(g, data2) \
-> Scheme code, may finish or may not | maybe loop
froz->in_progress inicates whether done /
-> continue scheme if not finished
In this process, it's the call stack between f(data) and the call
to frozen_run_some() that won't be copied in or out until f(data)
returns.
Nesting scheme_with_stack_freeze() calls should be safe, but it
won't achieve the goal, which is to limit the amount of work done
before returning (because the inner scheme_with_stack_freeze() will
have to run to completion). */
static unsigned long get_deeper_base();
typedef struct FrozenTramp {
MZTAG_IF_REQUIRED
Scheme_Frozen_Stack_Proc do_f;
void *do_data;
int val;
int in_progress;
int progress_is_resumed;
Scheme_Object *old_param;
Scheme_Config *config;
void *progress_base_addr;
mz_jmp_buf progress_base;
Scheme_Jumpup_Buf_Holder *progress_cont;
int timer_on;
double continue_until;
#ifdef MZ_PRECISE_GC
void *fixup_var_stack_chain;
#endif
} FrozenTramp;
int scheme_with_stack_freeze(Scheme_Frozen_Stack_Proc wha_f, void *wha_data)
{
FrozenTramp *froz;
Scheme_Cont_Frame_Data cframe;
Scheme_Object *bx;
int retval;
Scheme_Jumpup_Buf_Holder *pc;
froz = MALLOC_ONE_RT(FrozenTramp);
SET_REQUIRED_TAG(froz->type = scheme_rt_frozen_tramp);
bx = scheme_make_raw_pair((Scheme_Object *)froz, NULL);
scheme_push_continuation_frame(&cframe);
scheme_set_cont_mark(froz_key, bx);
pc = scheme_new_jmpupbuf_holder();
froz->progress_cont = pc;
scheme_init_jmpup_buf(&froz->progress_cont->buf);
scheme_start_atomic();
retval = wha_f(wha_data);
froz->val = retval;
if (froz->in_progress) {
/* We have leftover work; jump and finish it (non-atomically).
But don't swap until we've jumped back in, because the jump-in
point might be trying to suspend the thread (and that should
complete before any swap). */
scheme_end_atomic_no_swap();
SCHEME_CAR(bx) = NULL;
froz->in_progress = 0;
froz->progress_is_resumed = 1;
if (!scheme_setjmp(froz->progress_base)) {
#ifdef MZ_PRECISE_GC
froz->fixup_var_stack_chain = &__gc_var_stack__;
#endif
scheme_longjmpup(&froz->progress_cont->buf);
}
} else {
scheme_end_atomic();
}
scheme_pop_continuation_frame(&cframe);
froz->old_param = NULL;
froz->progress_cont = NULL;
froz->do_data = NULL;
return froz->val;
}
static void suspend_froz_progress(void)
{
FrozenTramp * volatile froz;
double msecs;
Scheme_Object *v;
v = scheme_extract_one_cc_mark(NULL, froz_key);
froz = (FrozenTramp *)SCHEME_CAR(v);
v = NULL;
msecs = scheme_get_inexact_milliseconds();
if (msecs < froz->continue_until)
return;
scheme_on_atomic_timeout = NULL;
froz->in_progress = 1;
if (scheme_setjmpup(&froz->progress_cont->buf, (void*)froz->progress_cont, froz->progress_base_addr)) {
/* we're back */
scheme_reset_jmpup_buf(&froz->progress_cont->buf);
#ifdef MZ_PRECISE_GC
/* Base addr points to the last valid gc_var_stack address.
Fixup that link to skip over the part of the stack we're
not using right now. */
((void **)froz->progress_base_addr)[0] = froz->fixup_var_stack_chain;
((void **)froz->progress_base_addr)[1] = NULL;
#endif
} else {
/* we're leaving */
scheme_longjmp(froz->progress_base, 1);
}
}
static void froz_run_new(FrozenTramp * volatile froz, int run_msecs)
{
double msecs;
/* We're willing to start new work that is specific to this thread */
froz->progress_is_resumed = 0;
msecs = scheme_get_inexact_milliseconds();
froz->continue_until = msecs + run_msecs;
if (!scheme_setjmp(froz->progress_base)) {
Scheme_Frozen_Stack_Proc do_f;
scheme_start_atomic();
scheme_on_atomic_timeout = suspend_froz_progress;
atomic_timeout_atomic_level = -1;
do_f = froz->do_f;
do_f(froz->do_data);
}
if (froz->progress_is_resumed) {
/* we've already returned once; jump out to new progress base */
scheme_longjmp(froz->progress_base, 1);
} else {
scheme_on_atomic_timeout = NULL;
scheme_end_atomic_no_swap();
}
}
static void froz_do_run_new(FrozenTramp * volatile froz, int *iteration, int run_msecs)
{
/* This function just makes room on the stack, eventually calling
froz_run_new(). */
int new_iter[32];
if (iteration[0] == 3) {
#ifdef MZ_PRECISE_GC
froz->progress_base_addr = (void *)&__gc_var_stack__;
#else
froz->progress_base_addr = (void *)new_iter;
#endif
froz_run_new(froz, run_msecs);
} else {
new_iter[0] = iteration[0] + 1;
froz_do_run_new(froz, new_iter, run_msecs);
}
}
int scheme_frozen_run_some(Scheme_Frozen_Stack_Proc do_f, void *do_data, int run_msecs)
{
FrozenTramp * volatile froz;
int more = 0;
Scheme_Object *v;
v = scheme_extract_one_cc_mark(NULL, froz_key);
if (v)
froz = (FrozenTramp *)SCHEME_CAR(v);
else
froz = NULL;
v = NULL;
if (froz) {
if (froz->in_progress) {
/* We have work in progress. */
if ((unsigned long)froz->progress_base_addr < get_deeper_base()) {
/* We have stack space to resume the old work: */
double msecs;
froz->in_progress = 0;
froz->progress_is_resumed = 1;
msecs = scheme_get_inexact_milliseconds();
froz->continue_until = msecs + run_msecs;
scheme_start_atomic();
scheme_on_atomic_timeout = suspend_froz_progress;
atomic_timeout_atomic_level = -1;
if (!scheme_setjmp(froz->progress_base)) {
#ifdef MZ_PRECISE_GC
froz->fixup_var_stack_chain = &__gc_var_stack__;
#endif
scheme_longjmpup(&froz->progress_cont->buf);
} else {
scheme_on_atomic_timeout = NULL;
scheme_end_atomic_no_swap();
}
}
} else {
int iter[1];
iter[0] = 0;
froz->do_f = do_f;
froz->do_data = do_data;
froz_do_run_new(froz, iter, run_msecs);
}
more = froz->in_progress;
}
return more;
}
int scheme_is_in_frozen_stack()
{
Scheme_Object *v;
v = scheme_extract_one_cc_mark(NULL, froz_key);
if (v)
return 1;
else
return 0;
}
/* Disable warning for returning address of local variable: */
#ifdef _MSC_VER
#pragma warning (disable:4172)
#endif
static unsigned long get_deeper_base()
{
long here;
unsigned long here_addr = (unsigned long)&here;
return here_addr;
}
#ifdef _MSC_VER
#pragma warning (default:4172)
#endif
/*========================================================================*/ /*========================================================================*/
/* precise GC */ /* precise GC */
/*========================================================================*/ /*========================================================================*/
@ -8396,7 +8163,6 @@ static void register_traversers(void)
GC_REG_TRAV(scheme_rt_evt, mark_evt); GC_REG_TRAV(scheme_rt_evt, mark_evt);
GC_REG_TRAV(scheme_rt_syncing, mark_syncing); GC_REG_TRAV(scheme_rt_syncing, mark_syncing);
GC_REG_TRAV(scheme_rt_parameterization, mark_parameterization); GC_REG_TRAV(scheme_rt_parameterization, mark_parameterization);
GC_REG_TRAV(scheme_rt_frozen_tramp, mark_frozen_tramp);
} }
END_XFORM_SKIP; END_XFORM_SKIP;