bc: fix potential GC problem with continuation invocation

Avoid updating the continuation-mark pointer until after the set of
mark segments is allocated when more segments are needed.
This commit is contained in:
Matthew Flatt 2020-05-07 16:15:51 -06:00
parent aeb32081db
commit b8f5216376
2 changed files with 14 additions and 11 deletions

View File

@ -4786,7 +4786,8 @@ static void copy_in_runstack(Scheme_Thread *p, Scheme_Saved_Stack *isaved, int s
static void copy_in_mark_stack(Scheme_Thread *p, Scheme_Cont_Mark *cont_mark_stack_copied,
MZ_MARK_STACK_TYPE cms, MZ_MARK_STACK_TYPE base_cms,
intptr_t copied_offset, Scheme_Object **_sub_conts,
int clear_caches)
int clear_caches,
MZ_MARK_POS_TYPE new_mark_pos)
/* Copies in the mark stack up to depth cms, but assumes that the
stack up to depth base_cms is already in place (probably in
place for a dynamic-wind context in an continuation
@ -4829,6 +4830,10 @@ static void copy_in_mark_stack(Scheme_Thread *p, Scheme_Cont_Mark *cont_mark_sta
}
}
/* Updated after potential GC: */
MZ_CONT_MARK_POS = new_mark_pos;
MZ_CONT_MARK_STACK = cms;
if (_sub_conts) {
if (*_sub_conts) {
sub_cont = (Scheme_Cont *)SCHEME_CAR(*_sub_conts);
@ -5322,12 +5327,11 @@ static MZ_MARK_STACK_TYPE exec_dyn_wind_pres(Scheme_Dynamic_Wind_List *dwl,
dynamic-wind context. Clear cached info on restore
if there's a prompt. */
DW_PrePost_Proc pre = dwl->dw->pre;
MZ_CONT_MARK_POS = dwl->dw->envss.cont_mark_pos;
MZ_CONT_MARK_STACK = dwl->dw->envss.cont_mark_stack;
copy_in_mark_stack(p, cont->cont_mark_stack_copied,
MZ_CONT_MARK_STACK, copied_cms,
dwl->dw->envss.cont_mark_stack, copied_cms,
cont->cont_mark_offset, _sub_conts,
clear_cm_caches);
clear_cm_caches,
dwl->dw->envss.cont_mark_pos);
copied_cms = MZ_CONT_MARK_STACK;
if (!skip_dws)
@ -5931,12 +5935,11 @@ static void restore_continuation(Scheme_Cont *cont, Scheme_Thread *p, int for_pr
/* Finish copying cont mark stack back in. */
MZ_CONT_MARK_POS = cont->ss.cont_mark_pos;
MZ_CONT_MARK_STACK = cont->ss.cont_mark_stack;
copy_in_mark_stack(p, cont->cont_mark_stack_copied,
MZ_CONT_MARK_STACK, copied_cms,
cont->ss.cont_mark_stack, copied_cms,
cont->cont_mark_offset, &sub_conts,
clear_cm_caches);
clear_cm_caches,
cont->ss.cont_mark_pos);
if (SAME_OBJ(result, SCHEME_MULTIPLE_VALUES)) {
p->ku.multiple.array = mv;
@ -6328,7 +6331,7 @@ void scheme_takeover_stacks(Scheme_Thread *p)
op->cont_mark_stack_swapped = swapped;
}
*(p->cont_mark_stack_owner) = p;
copy_in_mark_stack(p, p->cont_mark_stack_swapped, MZ_CONT_MARK_STACK, 0, 0, NULL, 0);
copy_in_mark_stack(p, p->cont_mark_stack_swapped, MZ_CONT_MARK_STACK, 0, 0, NULL, 0, MZ_CONT_MARK_POS);
p->cont_mark_stack_swapped = NULL;
}
}

View File

@ -9199,7 +9199,7 @@ static void prepare_thread_for_GC(Scheme_Object *t)
/* zero unused part of last mark stack segment */
segpos = ((intptr_t)p->cont_mark_stack >> SCHEME_LOG_MARK_SEGMENT_SIZE);
if (segpos < p->cont_mark_seg_count) {
Scheme_Cont_Mark *seg = p->cont_mark_stack_segments[segpos];
int stackpos = ((intptr_t)p->cont_mark_stack & SCHEME_MARK_SEGMENT_MASK);