adjust futures to decouple blocked futures from worker threads

which required adding a notion of "lightweight continuation" to
 the runtime system, where a lightweight continuation involves
 only frames from JIT0generated code (so that details of the stack
 layout are known, for example)
This commit is contained in:
Matthew Flatt 2010-10-07 06:17:05 -06:00
parent 99178c70a0
commit 15302dc844
15 changed files with 1386 additions and 315 deletions

View File

@ -114,6 +114,7 @@ We should also test deep continuations.
(with-continuation-mark
'x 1
(current-continuation-marks))))])
(sleep 0.1)
(list (continuation-mark-set->list (touch f1) 'x)
(continuation-mark-set->list (touch f2) 'x))))
@ -170,7 +171,7 @@ We should also test deep continuations.
;on a worker thread
(let ([f1 (future (λ () (current-future)))]
[f2 (future (λ () (current-future)))])
(sleep 3)
(sleep 0.1)
(check-equal? #t (equal? f1 (touch f1)))
(check-equal? #f (equal? f2 (touch f1)))
(check-equal? #t (equal? f2 (touch f2)))

View File

@ -769,6 +769,8 @@ static void *allocate_big(const size_t request_size_bytes, int type)
size_t allocate_size;
void *addr;
if (GC_gen0_alloc_only) return NULL;
#ifdef NEWGC_BTC_ACCOUNT
if(GC_out_of_memory) {
#ifdef MZ_USE_PLACES
@ -1119,7 +1121,7 @@ inline static void *allocate(const size_t request_size, const int type)
unsigned long newptr;
if(request_size == 0) return (void *) zero_sized;
allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
if(allocate_size > MAX_OBJECT_SIZE) return allocate_big(request_size, type);
@ -1131,6 +1133,8 @@ inline static void *allocate(const size_t request_size, const int type)
if(OVERFLOWS_GEN0(newptr)) {
NewGC *gc = GC_get_GC();
if (GC_gen0_alloc_only) return NULL;
#ifdef MZ_USE_PLACES
if (postmaster_and_master_gc(gc)) { return allocate_medium(request_size, type); }
#endif

View File

@ -108,6 +108,7 @@ typedef struct Thread_Local_Variables {
struct NewGC *GC_instance_;
unsigned long GC_gen0_alloc_page_ptr_;
unsigned long GC_gen0_alloc_page_end_;
int GC_gen0_alloc_only_;
void *bignum_cache_[BIGNUM_CACHE_SIZE];
int cache_count_;
struct Scheme_Hash_Table *toplevels_ht_;
@ -206,6 +207,7 @@ typedef struct Thread_Local_Variables {
struct Scheme_Thread *scheme_main_thread_;
struct Scheme_Thread *scheme_first_thread_;
struct Scheme_Thread_Set *scheme_thread_set_top_;
struct Scheme_Current_LWC *scheme_current_lwc_;
int num_running_threads_;
int swap_no_setjmp_;
int thread_swap_count_;
@ -402,6 +404,7 @@ XFORM_GC_VARIABLE_STACK_THROUGH_THREAD_LOCAL;
#define GC_instance XOA (scheme_get_thread_local_variables()->GC_instance_)
#define GC_gen0_alloc_page_ptr XOA (scheme_get_thread_local_variables()->GC_gen0_alloc_page_ptr_)
#define GC_gen0_alloc_page_end XOA (scheme_get_thread_local_variables()->GC_gen0_alloc_page_end_)
#define GC_gen0_alloc_only XOA (scheme_get_thread_local_variables()->GC_gen0_alloc_only_)
#define GC_variable_stack XOA (scheme_get_thread_local_variables()->GC_variable_stack_)
#define bignum_cache XOA (scheme_get_thread_local_variables()->bignum_cache_)
#define cache_count XOA (scheme_get_thread_local_variables()->cache_count_)
@ -502,6 +505,7 @@ XFORM_GC_VARIABLE_STACK_THROUGH_THREAD_LOCAL;
#define scheme_main_thread XOA (scheme_get_thread_local_variables()->scheme_main_thread_)
#define scheme_first_thread XOA (scheme_get_thread_local_variables()->scheme_first_thread_)
#define scheme_thread_set_top XOA (scheme_get_thread_local_variables()->scheme_thread_set_top_)
#define scheme_current_lwc XOA (scheme_get_thread_local_variables()->scheme_current_lwc_)
#define num_running_threads XOA (scheme_get_thread_local_variables()->num_running_threads_)
#define swap_no_setjmp XOA (scheme_get_thread_local_variables()->swap_no_setjmp_)
#define thread_swap_count XOA (scheme_get_thread_local_variables()->thread_swap_count_)

View File

@ -475,8 +475,9 @@ static Scheme_Env *place_instance_init(void *stack_base, int initial_main_os_thr
scheme_init_stack_check();
scheme_init_overflow();
init_toplevel_local_offsets_hashtable_caches();
scheme_init_thread_lwc();
init_toplevel_local_offsets_hashtable_caches();
#ifdef TIME_STARTUP_PROCESS
printf("pre-process @ %ld\n", scheme_get_process_milliseconds());

View File

@ -8322,6 +8322,276 @@ static Scheme_Object *continuation_prompt_available(int argc, Scheme_Object *arg
return scheme_false;
}
/*========================================================================*/
/* lightweight continuations */
/*========================================================================*/
/* A lightweight continuation is one that contains only frames from
JIT-generated code. The code here manages capture and restore for
the runstack and mark stack, while the rest is in the JIT. */
struct Scheme_Lightweight_Continuation {
MZTAG_IF_REQUIRED /* scheme_rt_lightweight_cont */
Scheme_Current_LWC *saved_lwc;
void *stack_slice;
Scheme_Object **runstack_slice;
Scheme_Cont_Mark *cont_mark_stack_slice;
};
void scheme_init_thread_lwc(void) XFORM_SKIP_PROC
{
scheme_current_lwc = (Scheme_Current_LWC *)malloc(sizeof(Scheme_Current_LWC));
}
void scheme_fill_lwc_start(void) XFORM_SKIP_PROC
{
scheme_current_lwc->runstack_start = MZ_RUNSTACK;
scheme_current_lwc->cont_mark_stack_start = MZ_CONT_MARK_STACK;
scheme_current_lwc->cont_mark_pos_start = MZ_CONT_MARK_POS;
}
void scheme_fill_lwc_end(void) XFORM_SKIP_PROC
{
scheme_current_lwc->runstack_end = MZ_RUNSTACK;
scheme_current_lwc->cont_mark_stack_end = MZ_CONT_MARK_STACK;
scheme_current_lwc->cont_mark_pos_end = MZ_CONT_MARK_POS;
scheme_fill_stack_lwc_end();
}
void scheme_clear_lwc(void) XFORM_SKIP_PROC
{
}
Scheme_Lightweight_Continuation *scheme_capture_lightweight_continuation(Scheme_Thread *p,
Scheme_Current_LWC *p_lwc,
void **storage)
XFORM_SKIP_PROC
/* This function explicitly coorperates with the GC by storing the
pointers it needs to save across a collection in `storage'. Also,
if allocation fails, it can abort and return NULL. The combination
allows it to work in a thread for runing futures (where allocation
and GC in general ae disallowed). */
{
long len, i, j, pos;
Scheme_Object **runstack_slice;
Scheme_Cont_Mark *cont_mark_stack_slice;
Scheme_Current_LWC *lwc;
Scheme_Cont_Mark *seg;
Scheme_Lightweight_Continuation *lw;
void *stack;
#ifndef MZ_PRECISE_GC
return NULL;
#endif
storage[1] = p;
lw = MALLOC_ONE_RT(Scheme_Lightweight_Continuation);
if (!lw) return NULL;
#ifdef MZTAG_REQUIRED
lw->type = scheme_rt_lightweight_cont;
#endif
storage[0] = lw;
lwc = (Scheme_Current_LWC *)scheme_malloc_atomic(sizeof(Scheme_Current_LWC));
if (!lwc) return NULL;
memcpy(lwc, p_lwc, sizeof(Scheme_Current_LWC));
lw = (Scheme_Lightweight_Continuation *)storage[0];
lw->saved_lwc = lwc;
stack = scheme_save_lightweight_continuation_stack(p_lwc);
if (!stack) return NULL;
lw = (Scheme_Lightweight_Continuation *)storage[0];
lw->stack_slice = stack;
len = lwc->runstack_start - lwc->runstack_end;
runstack_slice = MALLOC_N(Scheme_Object*, len);
if (!runstack_slice) return NULL;
lw = (Scheme_Lightweight_Continuation *)storage[0];
lw->runstack_slice = runstack_slice;
memcpy(runstack_slice, lw->saved_lwc->runstack_end, len * sizeof(Scheme_Object *));
/* The runstack may contain pointers to itself, but they are just
cleared slots where a register containing the runstack pointer
was handy; zero out such slots to avoid retaining a runstack
unnecessarily: */
for (i = 0; i < len; i++) {
if (((unsigned long)runstack_slice[i] >= (unsigned long)lwc->runstack_end)
&& ((unsigned long)runstack_slice[i] <= (unsigned long)lwc->runstack_start))
runstack_slice[i] = 0;
}
len = lwc->cont_mark_stack_end - lwc->cont_mark_stack_start;
if (len) {
cont_mark_stack_slice = MALLOC_N(Scheme_Cont_Mark, len);
if (!cont_mark_stack_slice) return NULL;
lw = (Scheme_Lightweight_Continuation *)storage[0];
} else
cont_mark_stack_slice = NULL;
lw->cont_mark_stack_slice = cont_mark_stack_slice;
lwc = lw->saved_lwc;
p = (Scheme_Thread *)storage[1];
for (j = 0; j < len; j++) {
i = j + lwc->cont_mark_stack_start;
seg = p->cont_mark_stack_segments[i >> SCHEME_LOG_MARK_SEGMENT_SIZE];
pos = i & SCHEME_MARK_SEGMENT_MASK;
memcpy(cont_mark_stack_slice + i, seg + pos, sizeof(Scheme_Cont_Mark));
}
return lw;
}
Scheme_Object **scheme_adjust_runstack_argument(Scheme_Lightweight_Continuation *lw,
Scheme_Object **arg)
XFORM_SKIP_PROC
{
if (arg == lw->saved_lwc->runstack_end)
return lw->runstack_slice;
else
return arg;
}
static void *apply_lwc_k()
{
Scheme_Thread *p = scheme_current_thread;
Scheme_Lightweight_Continuation *lw = (Scheme_Lightweight_Continuation *)p->ku.k.p1;
Scheme_Object *result = (Scheme_Object *)p->ku.k.p2;
p->ku.k.p1 = NULL;
p->ku.k.p2 = NULL;
return scheme_apply_lightweight_continuation(lw, result);
}
Scheme_Object *scheme_apply_lightweight_continuation(Scheme_Lightweight_Continuation *lw,
Scheme_Object *result) XFORM_SKIP_PROC
{
long len, cm_len, cm_pos_delta, cm_delta, i, cm;
Scheme_Cont_Mark *seg;
Scheme_Object **rs;
len = lw->saved_lwc->runstack_start - lw->saved_lwc->runstack_end;
if (!scheme_check_runstack(len)) {
/* This will not happen when restoring a future-thread-captured
continuation in a future thread. */
scheme_current_thread->ku.k.p1 = lw;
scheme_current_thread->ku.k.p2 = result;
return (Scheme_Object *)scheme_enlarge_runstack(len, apply_lwc_k);
}
/* FIXME: check whether the C stack is big enough */
/* application of a lightweight continuation forms a lightweight continuation: */
scheme_current_lwc->runstack_start = MZ_RUNSTACK;
scheme_current_lwc->cont_mark_stack_start = MZ_CONT_MARK_STACK;
scheme_current_lwc->cont_mark_pos_start = MZ_CONT_MARK_POS + 2;
cm_len = lw->saved_lwc->cont_mark_stack_end - lw->saved_lwc->cont_mark_stack_start;
if (cm_len) {
/* install captured continuation marks, adjusting the pos
to match the new context: */
seg = lw->cont_mark_stack_slice;
cm_pos_delta = MZ_CONT_MARK_POS + 2 - lw->saved_lwc->cont_mark_pos_start;
for (i = 0; i < cm_len; i++) {
MZ_CONT_MARK_POS = seg[i].pos + cm_pos_delta;
scheme_set_cont_mark(seg[i].key, seg[i].val);
}
MZ_CONT_MARK_POS = lw->saved_lwc->cont_mark_pos_end + cm_pos_delta;
}
cm_delta = (long)MZ_CONT_MARK_STACK - (long)lw->saved_lwc->cont_mark_stack_end;
rs = MZ_RUNSTACK - len;
MZ_RUNSTACK = rs;
memcpy(rs, lw->runstack_slice, len * sizeof(Scheme_Object*));
/* If SCHEME_EVAL_WAITING appears in the runstack slice, it
indicates that a cm position follows: */
for (i = 0; i < len; i++) {
if (rs[i] == SCHEME_EVAL_WAITING) {
cm = SCHEME_INT_VAL(rs[i+1]);
cm += cm_delta;
rs[i+1] = scheme_make_integer(cm);
}
}
return scheme_apply_lightweight_continuation_stack(lw->saved_lwc, lw->stack_slice, result);
}
int scheme_push_marks_from_lightweight_continuation(Scheme_Lightweight_Continuation *lw,
Scheme_Cont_Frame_Data *d)
{
Scheme_Thread *p;
long pos, len, delta;
Scheme_Cont_Mark *seg;
len = (lw->saved_lwc->cont_mark_stack_end
- lw->saved_lwc->cont_mark_stack_start);
if (len) {
scheme_push_continuation_frame(d);
p = scheme_current_thread;
seg = lw->cont_mark_stack_slice;
delta = MZ_CONT_MARK_POS + 2 - lw->saved_lwc->cont_mark_pos_start;
for (pos = 0; pos < len; pos++) {
MZ_CONT_MARK_POS = seg[pos].pos + delta;
scheme_set_cont_mark(seg[pos].key, seg[pos].val);
}
MZ_CONT_MARK_POS = lw->saved_lwc->cont_mark_pos_end + delta;
return 1;
}
return 0;
}
int scheme_push_marks_from_thread(Scheme_Thread *p2, Scheme_Cont_Frame_Data *d)
{
Scheme_Thread *p;
long i, pos, delta;
Scheme_Cont_Mark *seg;
if (p2->cont_mark_stack) {
scheme_push_continuation_frame(d);
p = scheme_current_thread;
delta = MZ_CONT_MARK_POS - p2->cont_mark_pos;
if (delta < 0) delta = 0;
for (i = 0; i < p2->cont_mark_stack; i++) {
seg = p2->cont_mark_stack_segments[i >> SCHEME_LOG_MARK_SEGMENT_SIZE];
pos = i & SCHEME_MARK_SEGMENT_MASK;
MZ_CONT_MARK_POS = seg[pos].pos + delta;
scheme_set_cont_mark(seg[pos].key, seg[pos].val);
}
MZ_CONT_MARK_POS = p2->cont_mark_pos + delta;
return 1;
}
return 0;
}
/*========================================================================*/
/* dynamic-wind */
/*========================================================================*/
@ -9494,6 +9764,7 @@ static void register_traversers(void)
GC_REG_TRAV(scheme_rt_dyn_wind_cell, mark_dyn_wind_cell);
GC_REG_TRAV(scheme_rt_dyn_wind_info, mark_dyn_wind_info);
GC_REG_TRAV(scheme_cont_mark_chain_type, mark_cont_mark_chain);
GC_REG_TRAV(scheme_rt_lightweight_cont, mark_lightweight_cont);
}
END_XFORM_SKIP;

View File

@ -89,7 +89,7 @@ static Scheme_Object *touch(int argc, Scheme_Object *argv[])
mz_jmp_buf newbuf, * volatile savebuf;
Scheme_Thread *p = scheme_current_thread;
/* In case another Scheme thread touchs the future. */
/* In case another Scheme thread touches the future. */
sema = scheme_make_sema(0);
ft->running_sema = sema;
@ -218,6 +218,7 @@ typedef struct Scheme_Future_State {
future_t *future_queue;
future_t *future_queue_end;
future_t *future_waiting_atomic;
future_t *future_waiting_lwc;
int next_futureid;
mzrt_mutex *future_mutex;
@ -253,6 +254,8 @@ THREAD_LOCAL_DECL(void *jit_future_storage[2]);
#ifdef MZ_PRECISE_GC
THREAD_LOCAL_DECL(extern unsigned long GC_gen0_alloc_page_ptr);
THREAD_LOCAL_DECL(extern unsigned long GC_gen0_alloc_page_end);
THREAD_LOCAL_DECL(extern int GC_gen0_alloc_only);
#endif
static void start_gc_not_ok(Scheme_Future_State *fs);
@ -293,6 +296,7 @@ typedef struct future_thread_params_t {
Scheme_Object ***scheme_current_runstack_start_ptr;
Scheme_Thread **current_thread_ptr;
void *jit_future_storage_ptr;
Scheme_Current_LWC *lwc;
} future_thread_params_t;
/**********************************************************************/
@ -375,6 +379,7 @@ void futures_init(void)
REGISTER_SO(fs->future_queue);
REGISTER_SO(fs->future_queue_end);
REGISTER_SO(fs->future_waiting_atomic);
REGISTER_SO(fs->future_waiting_lwc);
REGISTER_SO(jit_future_storage);
/* Create a 'dummy' future thread state object for the runtime
@ -725,7 +730,6 @@ Scheme_Object *touch(int argc, Scheme_Object *argv[])
ft->work_completed = 1;
ft->retval = retval;
ft->status = FINISHED;
dequeue_future(fs, ft);
mzrt_mutex_unlock(fs->future_mutex);
receive_special_result(ft, retval, 0);
@ -833,7 +837,7 @@ void *worker_thread_future_loop(void *arg)
Scheme_Future_Thread_State *fts = params->fts;
Scheme_Future_State *fs = params->fs;
Scheme_Object *v;
Scheme_Object* (*jitcode)(Scheme_Object*, int, Scheme_Object**);
Scheme_Closed_Prim *jitcode;
future_t *ft;
mz_jmp_buf newbuf;
@ -843,6 +847,8 @@ void *worker_thread_future_loop(void *arg)
GC_instance = params->shared_GC;
scheme_current_thread = params->thread_skeleton;
GC_gen0_alloc_only = 1;
/* Set processor affinity */
/*mzrt_mutex_lock(fs->future_mutex);
static unsigned long cur_cpu_mask = 1;
@ -876,6 +882,9 @@ void *worker_thread_future_loop(void *arg)
params->current_thread_ptr = &scheme_current_thread;
params->jit_future_storage_ptr = &jit_future_storage[0];
scheme_init_thread_lwc();
params->lwc = scheme_current_lwc;
mzrt_sema_post(params->ready_sema);
while (1) {
@ -895,59 +904,89 @@ void *worker_thread_future_loop(void *arg)
/* Set up the JIT compiler for this thread */
scheme_jit_fill_threadlocal_table();
jitcode = (Scheme_Object* (*)(Scheme_Object*, int, Scheme_Object**))(ft->code);
fts->current_ft = ft;
/* Run the code:
The lambda passed to a future will always be a parameterless
function.
From this thread's perspective, this call will never return
until all the work to be done in the future has been completed,
including runtime calls.
If jitcode asks the runrtime thread to do work, then
a GC can occur. */
LOG("Running JIT code at %p...\n", ft->code);
MZ_RUNSTACK = MZ_RUNSTACK_START + fts->runstack_size;
MZ_CONT_MARK_STACK = 0;
MZ_CONT_MARK_POS = (MZ_MARK_POS_TYPE)1;
scheme_current_thread->error_buf = &newbuf;
if (scheme_future_setjmp(newbuf)) {
/* failed */
v = NULL;
if (ft->suspended_lw) {
/* invoke a lightweight continuation */
scheme_current_thread->error_buf = &newbuf;
if (scheme_future_setjmp(newbuf)) {
/* failed or suspended */
v = NULL;
} else {
struct Scheme_Lightweight_Continuation *lw = ft->suspended_lw;
ft->suspended_lw = NULL;
v = ft->retval_s;
ft->retval_s = NULL;
receive_special_result(ft, v, 1);
v = scheme_apply_lightweight_continuation(lw, v);
if (SAME_OBJ(v, SCHEME_TAIL_CALL_WAITING)) {
v = scheme_ts_scheme_force_value_same_mark(v);
}
}
} else {
v = jitcode(ft->orig_lambda, 0, NULL);
if (SAME_OBJ(v, SCHEME_TAIL_CALL_WAITING)) {
v = scheme_ts_scheme_force_value_same_mark(v);
jitcode = ft->code;
/* Run the code:
The lambda passed to a future will always be a parameterless
function.
From this thread's perspective, this call will never return
until all the work to be done in the future has been completed,
including runtime calls.
If jitcode asks the runrtime thread to do work, then
a GC can occur. */
LOG("Running JIT code at %p...\n", ft->code);
scheme_current_thread->error_buf = &newbuf;
if (scheme_future_setjmp(newbuf)) {
/* failed or suspended */
v = NULL;
} else {
scheme_fill_lwc_start();
v = scheme_call_as_lightweight_continuation(jitcode, ft->orig_lambda, 0, NULL);
if (SAME_OBJ(v, SCHEME_TAIL_CALL_WAITING)) {
v = scheme_ts_scheme_force_value_same_mark(v);
}
}
LOG("Finished running JIT code at %p.\n", ft->code);
}
LOG("Finished running JIT code at %p.\n", ft->code);
/* Get future again, since a GC may have occurred */
/* Get future again, since a GC may have occurred or
future may have been suspended */
ft = fts->current_ft;
/* Set the return val in the descriptor */
mzrt_mutex_lock(fs->future_mutex);
ft->work_completed = 1;
ft->retval = v;
/* In case of multiple values: */
send_special_result(ft, v);
/* Update the status */
ft->status = FINISHED;
dequeue_future(fs, ft);
if (!ft) {
/* continuation of future will be requeued, and this future
thread can do something else */
} else {
/* Set the return val in the descriptor */
ft->work_completed = 1;
ft->retval = v;
/* In case of multiple values: */
send_special_result(ft, v);
/* Update the status */
ft->status = FINISHED;
}
/* Clear stacks */
MZ_RUNSTACK = MZ_RUNSTACK_START + fts->runstack_size;
MZ_CONT_MARK_STACK = 0;
scheme_signal_received_at(fs->signal_handle);
if (ft)
scheme_signal_received_at(fs->signal_handle);
}
end_gc_not_ok(fts, fs, NULL);
mzrt_mutex_unlock(fs->future_mutex);
@ -956,6 +995,45 @@ void *worker_thread_future_loop(void *arg)
return NULL;
}
static int capture_future_continuation(future_t *ft, void **storage)
XFORM_SKIP_PROC
/* This function explicitly coorperates with the GC by storing the
pointers it needs to save across a collection in `storage', so
it can be used in a future thread. If future-thread-local
allocation fails, the result is 0. */
{
Scheme_Lightweight_Continuation *lw;
Scheme_Object **arg_S;
storage[2] = ft;
lw = scheme_capture_lightweight_continuation(ft->arg_p, ft->lwc, storage);
if (!lw) return 0;
ft = (future_t *)storage[2];
ft->suspended_lw = lw;
ft->status = WAITING_FOR_REQUEUE;
ft->want_lw = 0;
ft->fts->current_ft = NULL; /* tells worker thread that it no longer
needs to handle the future */
if (ft->arg_S0) {
arg_S = scheme_adjust_runstack_argument(lw, ft->arg_S0);
ft->arg_S0 = arg_S;
}
if (ft->arg_S1) {
arg_S = scheme_adjust_runstack_argument(lw, ft->arg_S1);
ft->arg_S1 = arg_S;
}
if (ft->arg_S2) {
arg_S = scheme_adjust_runstack_argument(lw, ft->arg_S2);
ft->arg_S2 = arg_S;
}
return 1;
}
void scheme_check_future_work()
/* Called in the runtime thread by the scheduler */
{
@ -974,7 +1052,6 @@ void scheme_check_future_work()
if (ft) {
fs->future_waiting_atomic = ft->next_waiting_atomic;
ft->next_waiting_atomic = NULL;
ft->waiting_atomic = 0;
}
mzrt_mutex_unlock(fs->future_mutex);
@ -985,6 +1062,33 @@ void scheme_check_future_work()
} else
break;
}
while (1) {
/* Try to get a future waiting to be suspended */
mzrt_mutex_lock(fs->future_mutex);
ft = fs->future_waiting_lwc;
if (ft) {
fs->future_waiting_lwc = ft->next_waiting_lwc;
ft->next_waiting_lwc = NULL;
}
mzrt_mutex_unlock(fs->future_mutex);
if (ft && ft->want_lw) {
void *storage[3];
(void)capture_future_continuation(ft, storage);
/* Signal the waiting worker thread that it
can continue doing other things: */
mzrt_mutex_lock(fs->future_mutex);
if (ft->can_continue_sema) {
mzrt_sema_post(ft->can_continue_sema);
ft->can_continue_sema = NULL;
}
mzrt_mutex_unlock(fs->future_mutex);
} else
break;
}
}
static void future_do_runtimecall(Scheme_Future_Thread_State *fts,
@ -995,10 +1099,18 @@ static void future_do_runtimecall(Scheme_Future_Thread_State *fts,
{
future_t *future;
Scheme_Future_State *fs = scheme_future_state;
void *storage[3];
/* Fetch the future descriptor for this thread */
future = fts->current_ft;
if (!is_atomic) {
scheme_fill_lwc_end();
future->lwc = scheme_current_lwc;
future->fts = fts;
} else
future->lwc = NULL;
/* Set up the arguments for the runtime call
to be picked up by the main rt thread */
mzrt_mutex_lock(fs->future_mutex);
@ -1006,37 +1118,52 @@ static void future_do_runtimecall(Scheme_Future_Thread_State *fts,
future->prim_func = func;
future->rt_prim = 1;
future->rt_prim_is_atomic = is_atomic;
future->arg_p = scheme_current_thread;
if (is_atomic) {
if (!future->waiting_atomic) {
future->next_waiting_atomic = fs->future_waiting_atomic;
fs->future_waiting_atomic = future;
future->waiting_atomic = 1;
}
future->next_waiting_atomic = fs->future_waiting_atomic;
fs->future_waiting_atomic = future;
future->status = WAITING_FOR_PRIM;
} else if (GC_gen0_alloc_page_ptr
&& capture_future_continuation(future, storage)) {
/* this future thread will suspend handling the future
continuation until the result of the blocking call is ready;
fts->current_ft was set to NULL */
} else {
/* couldn't capture the continuation locally, so ask
the runtime thread to capture it: */
future->next_waiting_lwc = fs->future_waiting_lwc;
fs->future_waiting_lwc = future;
future->want_lw = 1;
future->status = WAITING_FOR_PRIM;
}
/* Update the future's status to waiting */
future->status = WAITING_FOR_PRIM;
scheme_signal_received_at(fs->signal_handle);
future->arg_p = scheme_current_thread;
if (fts->current_ft) {
/* Wait for the signal that the RT call is finished
or a lightweight continuation has been captured: */
future->can_continue_sema = fts->worker_can_continue_sema;
end_gc_not_ok(fts, fs, MZ_RUNSTACK); /* we rely on this putting MZ_CONT_MARK_STACK into the thread record */
mzrt_mutex_unlock(fs->future_mutex);
/* Wait for the signal that the RT call is finished */
future->can_continue_sema = fts->worker_can_continue_sema;
end_gc_not_ok(fts, fs, MZ_RUNSTACK); /* we rely on this putting MZ_CONT_MARK_STACK into the thread record */
mzrt_sema_wait(fts->worker_can_continue_sema);
mzrt_mutex_lock(fs->future_mutex);
start_gc_not_ok(fs);
}
mzrt_mutex_unlock(fs->future_mutex);
mzrt_sema_wait(fts->worker_can_continue_sema);
mzrt_mutex_lock(fs->future_mutex);
start_gc_not_ok(fs);
mzrt_mutex_unlock(fs->future_mutex);
/* Fetch the future instance again, in case the GC has moved the pointer */
/* Fetch the future instance again, in case the GC has moved the pointer
or the future has been requeued. */
future = fts->current_ft;
if (future->no_retval) {
if (!future) {
/* future continuation was requeued */
scheme_future_longjmp(*scheme_current_thread->error_buf, 1);
} else if (future->no_retval) {
/* there was an error => abort the future */
future->no_retval = 0;
scheme_future_longjmp(*scheme_current_thread->error_buf, 1);
}
@ -1119,6 +1246,8 @@ unsigned long scheme_rtcall_alloc(const char *who, int src_type)
}
}
GC_gen0_alloc_page_end = retval + fts->gen0_size;
return retval;
}
@ -1144,32 +1273,10 @@ void scheme_rtcall_new_mark_segment(Scheme_Thread *p)
static int push_marks(future_t *f, Scheme_Cont_Frame_Data *d)
{
Scheme_Thread *p2, *p;
long i, pos, delta;
Scheme_Cont_Mark *seg;
if (f->arg_p) {
p2 = f->arg_p;
if (p2->cont_mark_stack) {
scheme_push_continuation_frame(d);
p = scheme_current_thread;
delta = MZ_CONT_MARK_POS - p2->cont_mark_pos;
if (delta < 0) delta = 0;
for (i = p2->cont_mark_stack; i--; ) {
seg = p2->cont_mark_stack_segments[i >> SCHEME_LOG_MARK_SEGMENT_SIZE];
pos = i & SCHEME_MARK_SEGMENT_MASK;
MZ_CONT_MARK_POS = seg[pos].pos + delta;
scheme_set_cont_mark(seg[pos].key, seg[pos].val);
}
MZ_CONT_MARK_POS = p2->cont_mark_pos + delta;
return 1;
}
if (f->suspended_lw) {
return scheme_push_marks_from_lightweight_continuation(f->suspended_lw, d);
} else if (f->arg_p) {
return scheme_push_marks_from_thread(f->arg_p, d);
}
return 0;
@ -1243,7 +1350,9 @@ static void do_invoke_rtcall(Scheme_Future_State *fs, future_t *future)
#endif
future->rt_prim = 0;
future->want_lw = 0; /* in case we got to the call before we got around
to capturing an LWC */
if (scheme_log_level_p(scheme_main_logger, SCHEME_LOG_DEBUG)) {
const char *src;
@ -1269,7 +1378,8 @@ static void do_invoke_rtcall(Scheme_Future_State *fs, future_t *future)
}
if ((future->source_type == FSRC_RATOR)
|| (future->source_type == FSRC_MARKS))
|| (future->source_type == FSRC_MARKS)
|| (future->source_type == FSRC_PRIM))
need_pop = push_marks(future, &mark_d);
else
need_pop = 0;
@ -1280,8 +1390,11 @@ static void do_invoke_rtcall(Scheme_Future_State *fs, future_t *future)
case SIG_VOID_VOID_3ARGS:
{
prim_void_void_3args_t func = (prim_void_void_3args_t)future->prim_func;
GC_CAN_IGNORE Scheme_Object **arg_S0 = future->arg_S0;
func(future->arg_S0);
future->arg_S0 = NULL;
func(arg_S0);
break;
}
@ -1297,12 +1410,13 @@ static void do_invoke_rtcall(Scheme_Future_State *fs, future_t *future)
#endif
case SIG_ALLOC_MARK_SEGMENT:
{
Scheme_Thread *p_seg;
GC_CAN_IGNORE Scheme_Thread *p_seg;
p_seg = (Scheme_Thread *)future->arg_s0;
future->arg_s0 = NULL;
scheme_new_mark_segment(p_seg);
break;
}
# define LOCALIZE(t, f) GC_CAN_IGNORE t f = future->f
# include "jit_ts_runtime_glue.c"
default:
scheme_signal_error("unknown protocol %d", future->prim_protocol);
@ -1313,11 +1427,19 @@ static void do_invoke_rtcall(Scheme_Future_State *fs, future_t *future)
pop_marks(&mark_d);
mzrt_mutex_lock(fs->future_mutex);
/* Signal the waiting worker thread that it
can continue running machine code */
if (future->can_continue_sema) {
mzrt_sema_post(future->can_continue_sema);
future->can_continue_sema= NULL;
if (future->suspended_lw) {
/* Re-enqueue the future so that some future thread can continue */
future->status = PENDING;
enqueue_future(fs, future);
/* Signal that a future is pending */
mzrt_sema_post(fs->future_pending_sema);
} else {
/* Signal the waiting worker thread that it
can continue running machine code */
if (future->can_continue_sema) {
mzrt_sema_post(future->can_continue_sema);
future->can_continue_sema = NULL;
}
}
mzrt_mutex_unlock(fs->future_mutex);
}
@ -1346,12 +1468,19 @@ static void invoke_rtcall(Scheme_Future_State * volatile fs, future_t * volatile
if (scheme_setjmp(newbuf)) {
mzrt_mutex_lock(fs->future_mutex);
future->no_retval = 1;
/* Signal the waiting worker thread that it
can continue running machine code */
mzrt_sema_post(future->can_continue_sema);
future->can_continue_sema = NULL;
mzrt_mutex_unlock(fs->future_mutex);
scheme_longjmp(*savebuf, 1);
if (future->suspended_lw) {
/* Abandon the future */
future->status = FINISHED;
future->work_completed = 1;
future->retval = 0;
} else {
/* Signal the waiting worker thread that it
can continue running machine code */
mzrt_sema_post(future->can_continue_sema);
future->can_continue_sema = NULL;
mzrt_mutex_unlock(fs->future_mutex);
scheme_longjmp(*savebuf, 1);
}
} else {
if (future->rt_prim_is_atomic) {
do_invoke_rtcall(fs, future);
@ -1392,12 +1521,11 @@ future_t *get_pending_future(Scheme_Future_State *fs)
{
future_t *f;
for (f = fs->future_queue; f != NULL; f = f->next) {
if (f->status == PENDING)
return f;
}
f = fs->future_queue;
if (f)
dequeue_future(fs, f);
return NULL;
return f;
}
#endif

View File

@ -35,6 +35,7 @@ typedef void* (*prim_pvoid_pvoid_pvoid_t)(void*, void*);
#define WAITING_FOR_PRIM 2
#define FINISHED 3
#define PENDING_OVERSIZE 4
#define WAITING_FOR_REQUEUE 5
#define FSRC_OTHER 0
#define FSRC_RATOR 1
@ -55,6 +56,7 @@ typedef struct future_t {
/* Runtime call stuff */
int rt_prim; /* flag to indicate waiting for a prim call */
int want_lw; /* flag to indicate waiting for lw capture */
int rt_prim_is_atomic;
double time_of_request;
const char *source_of_request;
@ -81,7 +83,12 @@ typedef struct future_t {
Scheme_Object *arg_s2;
Scheme_Object **arg_S2;
int arg_i2;
Scheme_Thread *arg_p;
struct Scheme_Current_LWC *lwc;
struct Scheme_Future_Thread_State *fts;
struct Scheme_Lightweight_Continuation *suspended_lw;
Scheme_Object *retval_s;
void *retval_p; /* use only with conservative GC */
@ -99,8 +106,8 @@ typedef struct future_t {
struct future_t *prev;
struct future_t *next;
int waiting_atomic;
struct future_t *next_waiting_atomic;
struct future_t *next_waiting_lwc;
} future_t;
/* Primitive instrumentation stuff */

View File

@ -16,6 +16,21 @@
[(#\v) "void"]
[else (error 'char->type "unknown: ~e" c)]))
(define (is-pointer-type? c)
(case c
[(#\s) #t]
[(#\t) #t]
[(#\S) #t]
[(#\b) #t]
[(#\n) #t]
[(#\m) #f]
[(#\p) #t]
[(#\i) #f]
[(#\l) #f]
[(#\z) #f]
[(#\v) #f]
[else (error 'char->type "unknown: ~e" c)]))
(define (type->arg-string t)
(let* ([t (symbol->string t)])
(substring t 0 (- (string-length t) 2))))
@ -105,13 +120,24 @@
case SIG_@|ts|:
{
prim_@|ts| f = (prim_@|ts|)future->prim_func;
@(if (string=? result-type "void") "" @string-append{@|result-type| retval;})
@(if (string=? result-type "void") "" @string-append{GC_CAN_IGNORE @|result-type| retval;})
@(if (equal? arg-types '("Scheme_Object*")) @string-append{receive_special_result(future, future->arg_s0, 1);} "")
@(string-join
(for/list ([t (in-string (type->arg-string t))]
[i (in-naturals)])
@string-append{LOCALIZE(@(char->type t), arg_@|(string t)|@|(number->string i)|);})
" ")
@(string-join
(for/list ([t (in-string (type->arg-string t))]
[i (in-naturals)]
#:when (is-pointer-type? t))
@string-append{future->arg_@|(string t)|@|(number->string i)| = NULL;})
" ")
@(if (string=? result-type "void") "" "retval = ")
f(@(string-join
(for/list ([t (in-string (type->arg-string t))]
[i (in-naturals)])
@string-append{future->arg_@|(string t)|@|(number->string i)|})
@string-append{arg_@|(string t)|@|(number->string i)|})
", "));
@(if (string=? result-type "void") "" @string-append{future->retval_@(substring ts (sub1 (string-length ts))) = retval;})
@(if (string=? result-type "Scheme_Object*") @string-append{send_special_result(future, retval);} "")

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,12 @@
case SIG_siS_s:
{
prim_siS_s f = (prim_siS_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(int, arg_i1); LOCALIZE(Scheme_Object**, arg_S2);
future->arg_s0 = NULL; future->arg_S2 = NULL;
retval =
f(future->arg_s0, future->arg_i1, future->arg_S2);
f(arg_s0, arg_i1, arg_S2);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -12,10 +14,12 @@ case SIG_siS_s:
case SIG_iSs_s:
{
prim_iSs_s f = (prim_iSs_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(int, arg_i0); LOCALIZE(Scheme_Object**, arg_S1); LOCALIZE(Scheme_Object*, arg_s2);
future->arg_S1 = NULL; future->arg_s2 = NULL;
retval =
f(future->arg_i0, future->arg_S1, future->arg_s2);
f(arg_i0, arg_S1, arg_s2);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -23,10 +27,12 @@ case SIG_iSs_s:
case SIG_s_s:
{
prim_s_s f = (prim_s_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
receive_special_result(future, future->arg_s0, 1);
LOCALIZE(Scheme_Object*, arg_s0);
future->arg_s0 = NULL;
retval =
f(future->arg_s0);
f(arg_s0);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -34,10 +40,12 @@ case SIG_s_s:
case SIG_n_s:
{
prim_n_s f = (prim_n_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Native_Closure_Data*, arg_n0);
future->arg_n0 = NULL;
retval =
f(future->arg_n0);
f(arg_n0);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -45,7 +53,9 @@ case SIG_n_s:
case SIG__s:
{
prim__s f = (prim__s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
retval =
f();
@ -56,10 +66,12 @@ case SIG__s:
case SIG_ss_s:
{
prim_ss_s f = (prim_ss_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(Scheme_Object*, arg_s1);
future->arg_s0 = NULL; future->arg_s1 = NULL;
retval =
f(future->arg_s0, future->arg_s1);
f(arg_s0, arg_s1);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -67,10 +79,12 @@ case SIG_ss_s:
case SIG_tt_s:
{
prim_tt_s f = (prim_tt_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(const Scheme_Object*, arg_t0); LOCALIZE(const Scheme_Object*, arg_t1);
future->arg_t0 = NULL; future->arg_t1 = NULL;
retval =
f(future->arg_t0, future->arg_t1);
f(arg_t0, arg_t1);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -78,10 +92,12 @@ case SIG_tt_s:
case SIG_ss_m:
{
prim_ss_m f = (prim_ss_m)future->prim_func;
MZ_MARK_STACK_TYPE retval;
GC_CAN_IGNORE MZ_MARK_STACK_TYPE retval;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(Scheme_Object*, arg_s1);
future->arg_s0 = NULL; future->arg_s1 = NULL;
retval =
f(future->arg_s0, future->arg_s1);
f(arg_s0, arg_s1);
future->retval_m = retval;
break;
@ -89,10 +105,12 @@ case SIG_ss_m:
case SIG_Sl_s:
{
prim_Sl_s f = (prim_Sl_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Object**, arg_S0); LOCALIZE(long, arg_l1);
future->arg_S0 = NULL;
retval =
f(future->arg_S0, future->arg_l1);
f(arg_S0, arg_l1);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -100,10 +118,12 @@ case SIG_Sl_s:
case SIG_l_s:
{
prim_l_s f = (prim_l_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(long, arg_l0);
retval =
f(future->arg_l0);
f(arg_l0);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -113,8 +133,10 @@ case SIG_bsi_v:
prim_bsi_v f = (prim_bsi_v)future->prim_func;
LOCALIZE(Scheme_Bucket*, arg_b0); LOCALIZE(Scheme_Object*, arg_s1); LOCALIZE(int, arg_i2);
future->arg_b0 = NULL; future->arg_s1 = NULL;
f(future->arg_b0, future->arg_s1, future->arg_i2);
f(arg_b0, arg_s1, arg_i2);
break;
@ -124,8 +146,10 @@ case SIG_iiS_v:
prim_iiS_v f = (prim_iiS_v)future->prim_func;
LOCALIZE(int, arg_i0); LOCALIZE(int, arg_i1); LOCALIZE(Scheme_Object**, arg_S2);
future->arg_S2 = NULL;
f(future->arg_i0, future->arg_i1, future->arg_S2);
f(arg_i0, arg_i1, arg_S2);
break;
@ -135,8 +159,10 @@ case SIG_ss_v:
prim_ss_v f = (prim_ss_v)future->prim_func;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(Scheme_Object*, arg_s1);
future->arg_s0 = NULL; future->arg_s1 = NULL;
f(future->arg_s0, future->arg_s1);
f(arg_s0, arg_s1);
break;
@ -146,8 +172,10 @@ case SIG_b_v:
prim_b_v f = (prim_b_v)future->prim_func;
LOCALIZE(Scheme_Bucket*, arg_b0);
future->arg_b0 = NULL;
f(future->arg_b0);
f(arg_b0);
break;
@ -155,10 +183,12 @@ case SIG_b_v:
case SIG_sl_s:
{
prim_sl_s f = (prim_sl_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(long, arg_l1);
future->arg_s0 = NULL;
retval =
f(future->arg_s0, future->arg_l1);
f(arg_s0, arg_l1);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -166,10 +196,12 @@ case SIG_sl_s:
case SIG_iS_s:
{
prim_iS_s f = (prim_iS_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(int, arg_i0); LOCALIZE(Scheme_Object**, arg_S1);
future->arg_S1 = NULL;
retval =
f(future->arg_i0, future->arg_S1);
f(arg_i0, arg_S1);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -177,10 +209,12 @@ case SIG_iS_s:
case SIG_S_s:
{
prim_S_s f = (prim_S_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Object**, arg_S0);
future->arg_S0 = NULL;
retval =
f(future->arg_S0);
f(arg_S0);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -190,8 +224,10 @@ case SIG_s_v:
prim_s_v f = (prim_s_v)future->prim_func;
receive_special_result(future, future->arg_s0, 1);
LOCALIZE(Scheme_Object*, arg_s0);
future->arg_s0 = NULL;
f(future->arg_s0);
f(arg_s0);
break;
@ -199,10 +235,12 @@ case SIG_s_v:
case SIG_iSi_s:
{
prim_iSi_s f = (prim_iSi_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(int, arg_i0); LOCALIZE(Scheme_Object**, arg_S1); LOCALIZE(int, arg_i2);
future->arg_S1 = NULL;
retval =
f(future->arg_i0, future->arg_S1, future->arg_i2);
f(arg_i0, arg_S1, arg_i2);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -212,8 +250,10 @@ case SIG_siS_v:
prim_siS_v f = (prim_siS_v)future->prim_func;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(int, arg_i1); LOCALIZE(Scheme_Object**, arg_S2);
future->arg_s0 = NULL; future->arg_S2 = NULL;
f(future->arg_s0, future->arg_i1, future->arg_S2);
f(arg_s0, arg_i1, arg_S2);
break;
@ -221,10 +261,12 @@ case SIG_siS_v:
case SIG_z_p:
{
prim_z_p f = (prim_z_p)future->prim_func;
void* retval;
GC_CAN_IGNORE void* retval;
LOCALIZE(size_t, arg_z0);
retval =
f(future->arg_z0);
f(arg_z0);
future->retval_p = retval;
break;
@ -232,10 +274,12 @@ case SIG_z_p:
case SIG_si_s:
{
prim_si_s f = (prim_si_s)future->prim_func;
Scheme_Object* retval;
GC_CAN_IGNORE Scheme_Object* retval;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(int, arg_i1);
future->arg_s0 = NULL;
retval =
f(future->arg_s0, future->arg_i1);
f(arg_s0, arg_i1);
future->retval_s = retval;
send_special_result(future, retval);
break;
@ -245,8 +289,10 @@ case SIG_sis_v:
prim_sis_v f = (prim_sis_v)future->prim_func;
LOCALIZE(Scheme_Object*, arg_s0); LOCALIZE(int, arg_i1); LOCALIZE(Scheme_Object*, arg_s2);
future->arg_s0 = NULL; future->arg_s2 = NULL;
f(future->arg_s0, future->arg_i1, future->arg_s2);
f(arg_s0, arg_i1, arg_s2);
break;

View File

@ -62,6 +62,9 @@ struct jit_local_state {
#endif
int r0_can_be_tmp;
int argssize;
#ifdef JIT_X86_64
int argpushes;
#endif
};
/* 3-parameter operation */
@ -347,6 +350,19 @@ struct jit_local_state {
#define jit_pushr_l(rs) jit_pushr_i(rs)
#define jit_popr_l(rs) jit_popr_i(rs)
/* For getting certain arguments (e.g., after pointer, int, and pointer)
before we set up the local frame: */
#define JIT_PREARG JIT_R0
#ifdef JIT_X86_64
# define jit_getprearg__p(r) (MOVQrr(_EDI, r))
# define jit_getprearg_pip_p(r) (MOVQrr(_ECX, r))
# define jit_getprearg_pipp_p(r) (MOVQrr(JIT_R(8), r))
#else
# define jit_getprearg__p(r) (jit_ldxi_p(r, JIT_SP, 4))
# define jit_getprearg_pip_p(r) (jit_ldxi_p(r, JIT_SP, 16))
# define jit_getprearg_pipp_p(r) (jit_ldxi_p(r, JIT_SP, 20))
#endif
#ifdef JIT_X86_64
# define jit_base_prolog() (PUSHQr(_EBP), MOVQrr(_ESP, _EBP), PUSHQr(_EBX), PUSHQr(_R12), PUSHQr(_R13))
# define jit_prolog(n) (_jitl.nextarg_geti = 0, jit_base_prolog())
@ -359,7 +375,7 @@ struct jit_local_state {
#ifdef JIT_X86_64
/* Stack isn't used for arguments: */
# define jit_prepare_i(ni) (_jitl.argssize = 0)
# define jit_prepare_i(ni) (_jitl.argssize = (ni), _jitl.argpushes = _jitl.argssize)
#else
# ifdef _CALL_DARWIN
/* Stack must stay 16-byte aligned: */
@ -375,12 +391,13 @@ struct jit_local_state {
#define jit_prepare_f(nf) (_jitl.argssize += (nf))
#define jit_prepare_d(nd) (_jitl.argssize += 2 * (nd))
#ifdef JIT_X86_64
# define jit_pusharg_i(rs) (_jitl.argssize++, MOVQrr(rs, JIT_CALLTMPSTART + _jitl.argssize - 1))
# define jit_normal_pushonlyarg_i(rs) (_jitl.argssize++, MOVQrr(rs, _EDI))
# define jit_save_argstate(curstate) curstate = _jitl.argssize;
# define jit_restore_argstate(curstate) _jitl.argssize = curstate;
# define jit_pusharg_i(rs) (_jitl.argpushes--, MOVQrr(rs, JIT_CALLTMPSTART + _jitl.argpushes))
# define jit_normal_pushonlyarg_i(rs) (_jitl.argpushes--, MOVQrr(rs, _EDI))
# define jit_save_argstate(curstate) curstate = _jitl.argpushes;
# define jit_restore_argstate(curstate) _jitl.argpushes = curstate;
# define jit_finish(sub) (jit_shift_args(), (void)jit_calli((sub)), jit_restore_locals())
# define jit_normal_finish(sub) jit_calli((sub))
# define jit_return_pop_insn_len() 0
# define jit_reg_is_arg(reg) ((reg == _EDI) || (reg ==_ESI) || (reg == _EDX))
# define jit_finishr(reg) ((jit_reg_is_arg((reg)) ? MOVQrr(reg, JIT_REXTMP) : (void)0), \
jit_shift_args(), \
@ -389,12 +406,12 @@ struct jit_local_state {
/* R12 and R13 are callee-save, instead of EDI and ESI */
# define jit_shift_args() \
(MOVQrr(_ESI, _R12), MOVQrr(_EDI, _R13), \
(_jitl.argssize-- \
? (MOVQrr(JIT_CALLTMPSTART + _jitl.argssize, jit_arg_reg_order[0]), \
(_jitl.argssize-- \
? (MOVQrr(JIT_CALLTMPSTART + _jitl.argssize, jit_arg_reg_order[1]), \
(_jitl.argssize-- \
? MOVQrr(JIT_CALLTMPSTART, jit_arg_reg_order[2]) \
(_jitl.argssize \
? (MOVQrr(JIT_CALLTMPSTART, jit_arg_reg_order[0]), \
((_jitl.argssize > 1) \
? (MOVQrr(JIT_CALLTMPSTART + 1, jit_arg_reg_order[1]), \
((_jitl.argssize > 2) \
? MOVQrr(JIT_CALLTMPSTART + 2, jit_arg_reg_order[2]) \
: (void)0)) \
: (void)0)) \
: (void)0))
@ -407,6 +424,7 @@ struct jit_local_state {
# define jit_restore_argstate(curstate) _jitl.argssize = curstate;
# define jit_finish(sub) ((void)jit_calli((sub)), ADDLir(sizeof(long) * _jitl.argssize, JIT_SP), _jitl.argssize = 0)
# define jit_finishr(reg) (jit_callr((reg)), ADDLir(sizeof(long) * _jitl.argssize, JIT_SP), _jitl.argssize = 0)
# define jit_return_pop_insn_len() 3 /* size of ADDLir() */
# define jit_normal_finish(sub) jit_finish(sub)
#endif
#define jit_pusharg_l(rs) jit_pusharg_i(rs)
@ -418,7 +436,7 @@ struct jit_local_state {
#define jit_arg_l() (_jitl.nextarg_geti++)
#define jit_arg_p() (_jitl.nextarg_geti++)
#define jit_arg_reg(p) (jit_arg_reg_order[p])
static int jit_arg_reg_order[] = { _EDI, _ESI, _EDX, _ECX };
static const int const jit_arg_reg_order[] = { _EDI, _ESI, _EDX, _ECX };
#else
#define jit_arg_c() ((_jitl.framesize += sizeof(int)) - sizeof(int))
#define jit_arg_uc() ((_jitl.framesize += sizeof(int)) - sizeof(int))

View File

@ -3315,6 +3315,39 @@ static int mark_cont_mark_chain_FIXUP(void *p, struct NewGC *gc) {
#define mark_cont_mark_chain_IS_CONST_SIZE 1
static int mark_lightweight_cont_SIZE(void *p, struct NewGC *gc) {
return
gcBYTES_TO_WORDS(sizeof(Scheme_Lightweight_Continuation));
}
static int mark_lightweight_cont_MARK(void *p, struct NewGC *gc) {
Scheme_Lightweight_Continuation *lw = (Scheme_Lightweight_Continuation *)p;
gcMARK2(lw->saved_lwc, gc);
gcMARK2(lw->stack_slice, gc);
gcMARK2(lw->runstack_slice, gc);
gcMARK2(lw->cont_mark_stack_slice, gc);
return
gcBYTES_TO_WORDS(sizeof(Scheme_Lightweight_Continuation));
}
static int mark_lightweight_cont_FIXUP(void *p, struct NewGC *gc) {
Scheme_Lightweight_Continuation *lw = (Scheme_Lightweight_Continuation *)p;
gcFIXUP2(lw->saved_lwc, gc);
gcFIXUP2(lw->stack_slice, gc);
gcFIXUP2(lw->runstack_slice, gc);
gcFIXUP2(lw->cont_mark_stack_slice, gc);
return
gcBYTES_TO_WORDS(sizeof(Scheme_Lightweight_Continuation));
}
#define mark_lightweight_cont_IS_ATOMIC 0
#define mark_lightweight_cont_IS_CONST_SIZE 1
#endif /* FUN */
/**********************************************************************/
@ -5722,6 +5755,8 @@ static int future_MARK(void *p, struct NewGC *gc) {
gcMARK2(f->prev, gc);
gcMARK2(f->next, gc);
gcMARK2(f->next_waiting_atomic, gc);
gcMARK2(f->next_waiting_lwc, gc);
gcMARK2(f->suspended_lw, gc);
return
gcBYTES_TO_WORDS(sizeof(future_t));
}
@ -5748,6 +5783,8 @@ static int future_FIXUP(void *p, struct NewGC *gc) {
gcFIXUP2(f->prev, gc);
gcFIXUP2(f->next, gc);
gcFIXUP2(f->next_waiting_atomic, gc);
gcFIXUP2(f->next_waiting_lwc, gc);
gcFIXUP2(f->suspended_lw, gc);
return
gcBYTES_TO_WORDS(sizeof(future_t));
}

View File

@ -1334,6 +1334,19 @@ mark_cont_mark_chain {
gcBYTES_TO_WORDS(sizeof(Scheme_Cont_Mark_Chain));
}
mark_lightweight_cont {
mark:
Scheme_Lightweight_Continuation *lw = (Scheme_Lightweight_Continuation *)p;
gcMARK2(lw->saved_lwc, gc);
gcMARK2(lw->stack_slice, gc);
gcMARK2(lw->runstack_slice, gc);
gcMARK2(lw->cont_mark_stack_slice, gc);
size:
gcBYTES_TO_WORDS(sizeof(Scheme_Lightweight_Continuation));
}
END fun;
/**********************************************************************/
@ -2353,6 +2366,8 @@ future {
gcMARK2(f->prev, gc);
gcMARK2(f->next, gc);
gcMARK2(f->next_waiting_atomic, gc);
gcMARK2(f->next_waiting_lwc, gc);
gcMARK2(f->suspended_lw, gc);
size:
gcBYTES_TO_WORDS(sizeof(future_t));
}

View File

@ -2254,6 +2254,53 @@ typedef struct {
Scheme_Native_Closure_Data *scheme_generate_lambda(Scheme_Closure_Data *obj, int drop_code,
Scheme_Native_Closure_Data *case_lam);
typedef struct Scheme_Current_LWC {
/* !! All of these fields are treated as atomic by the GC !! */
Scheme_Object **runstack_start;
MZ_MARK_STACK_TYPE cont_mark_stack_start;
MZ_MARK_POS_TYPE cont_mark_pos_start;
void *stack_start;
Scheme_Object **runstack_end;
Scheme_Object **runstack_base_end;
MZ_MARK_STACK_TYPE cont_mark_stack_end;
MZ_MARK_POS_TYPE cont_mark_pos_end;
void *frame_end;
void *stack_end;
void *original_dest;
void *saved_v1;
double saved_save_fp;
} Scheme_Current_LWC;
void scheme_init_thread_lwc(void);
void scheme_fill_lwc_start(void);
void scheme_fill_lwc_end(void);
void scheme_fill_stack_lwc_end(void);
void scheme_clear_lwc(void);
THREAD_LOCAL_DECL(MZ_EXTERN Scheme_Current_LWC *scheme_current_lwc);
Scheme_Object *scheme_call_as_lightweight_continuation(Scheme_Closed_Prim *code,
void *data,
int argc,
Scheme_Object **argv);
void *scheme_save_lightweight_continuation_stack(Scheme_Current_LWC *lwc);
Scheme_Object *scheme_apply_lightweight_continuation_stack(Scheme_Current_LWC *lwc, void *stack,
Scheme_Object *result);
struct Scheme_Lightweight_Continuation;
typedef struct Scheme_Lightweight_Continuation Scheme_Lightweight_Continuation;
Scheme_Lightweight_Continuation *scheme_capture_lightweight_continuation(Scheme_Thread *p,
Scheme_Current_LWC *p_lwc,
void **storage);
Scheme_Object *scheme_apply_lightweight_continuation(Scheme_Lightweight_Continuation *captured,
Scheme_Object *result);
Scheme_Object **scheme_adjust_runstack_argument(Scheme_Lightweight_Continuation *captured,
Scheme_Object **arg);
int scheme_push_marks_from_thread(Scheme_Thread *p2, Scheme_Cont_Frame_Data *d);
int scheme_push_marks_from_lightweight_continuation(Scheme_Lightweight_Continuation *captured,
Scheme_Cont_Frame_Data *d);
#define scheme_new_frame(n) scheme_new_special_frame(n, 0)
#define scheme_extend_env(f, e) (f->basic.next = e, f)
#define scheme_next_frame(e) ((e)->basic.next)

View File

@ -257,6 +257,7 @@ enum {
scheme_rt_validate_clearing, /* 234 */
scheme_rt_rb_node, /* 235 */
scheme_rt_frozen_tramp, /* 236 */
scheme_rt_lightweight_cont, /* 237 */
#endif