diff --git a/racket/src/ChezScheme/IMPLEMENTATION.md b/racket/src/ChezScheme/IMPLEMENTATION.md index da8d976d5c..753a677ea3 100644 --- a/racket/src/ChezScheme/IMPLEMENTATION.md +++ b/racket/src/ChezScheme/IMPLEMENTATION.md @@ -985,8 +985,8 @@ machine-specific linking dierctives can appear. In the case of address), `arm32-call` (call an asolute address while setting the link register), and a`arm32-jump` (jump to an asolute address). These are turned into relocation entries associated with compiled code by steps -in "compile.ss". Relocaiton entires are used when loding an GCing with -update routines implemented in "fasl.c". +in "compile.ss". Relocation entries are used when loading and GCing +with update routines implemented in "fasl.c". Typically, a linking directive is written just after some code that is generated as installing a dummy value, and theen the update routine in diff --git a/racket/src/ChezScheme/c/alloc.c b/racket/src/ChezScheme/c/alloc.c index 74ee47a0b0..a3afcfa658 100644 --- a/racket/src/ChezScheme/c/alloc.c +++ b/racket/src/ChezScheme/c/alloc.c @@ -36,6 +36,7 @@ void S_alloc_init() { S_G.main_thread_gc.base_loc[g][s] = FIX(0); S_G.main_thread_gc.next_loc[g][s] = FIX(0); S_G.main_thread_gc.bytes_left[g][s] = 0; + S_G.main_thread_gc.sweep_next[g][s] = NULL; S_G.bytes_of_space[g][s] = 0; } } @@ -287,6 +288,7 @@ void S_close_off_thread_local_segment(ptr tc, ISPC s, IGEN g) { tgc->bytes_left[g][s] = 0; tgc->next_loc[g][s] = (ptr)0; tgc->sweep_loc[g][s] = (ptr)0; + tgc->sweep_next[g][s] = NULL; } /* S_reset_allocation_pointer is always called with allocation mutex diff --git a/racket/src/ChezScheme/c/gc.c b/racket/src/ChezScheme/c/gc.c index 78ba8c49c6..234ad5df76 100644 --- a/racket/src/ChezScheme/c/gc.c +++ b/racket/src/ChezScheme/c/gc.c @@ -126,7 +126,7 @@ Parallel mode runs `sweep_generation` concurrently in multiple sweeper threads. It relies on a number of invariants: - * There are no attempts to take tc_mutex suring sweeping. To the + * There are no attempts to take tc_mutex during sweeping. To the degree that locking is needed (e.g., to allocate new segments), the allocation mutex is used. No other locks can be taken while that one is held. @@ -167,7 +167,7 @@ * Normally, a sweeper that encounters a remote reference can continue sweeping and eventually register the remote re-sweep. - An object is swept by only one sweeper at a time; if mmultiple + An object is swept by only one sweeper at a time; if multiple remote references to different sweepers are discovered in an object, it is sent to only one of the remote sweepers, and that sweeper will eventually send on the object to the other sweeper. @@ -1907,7 +1907,7 @@ static iptr sweep_generation_pass(thread_gc *tgc) { for (from_g = MIN_TG; from_g <= MAX_TG; from_g += 1) { sweep_space(space_impure, from_g, { - /* only pairs in theses spaces in backreference mode */ + /* only pairs in these spaces in backreference mode */ FLUSH_REMOTE_BLOCK SET_BACKREFERENCE(TYPE(TO_PTR(pp), type_pair)); relocate_impure_help(pp, p, from_g); diff --git a/racket/src/ChezScheme/c/segment.c b/racket/src/ChezScheme/c/segment.c index 307f42ceb8..cfbf1a4415 100644 --- a/racket/src/ChezScheme/c/segment.c +++ b/racket/src/ChezScheme/c/segment.c @@ -579,12 +579,11 @@ static void contract_segment_table(uptr base, uptr end) { Scheme rule that a foreign thread is allowed to invoke a callback (as long as the callback is immobile/locked) at any time --- even, say, while Scheme is collecting garbage and needs to write to - executable pages. However, on platforms where such a disposition - is enforced (eg. iOS), we provide a best-effort implementation that - flips pages between W and X for the minimal set of segments - possible (depending on the context) in an effort to minimize the - chances of a page being flipped while a thread is executing code - off of it. + executable pages. However, on platforms where W^X is enforced + (eg. iOS), we provide a best-effort implementation that flips pages + between W and X for the minimal set of segments possible (depending + on the context) in an effort to minimize the chances of a page + being flipped while a thread is executing code off of it. */ void S_thread_start_code_write(WX_UNUSED ptr tc, WX_UNUSED IGEN maxg, WX_UNUSED IBOOL current, WX_UNUSED void *hint) { @@ -659,12 +658,10 @@ static void enable_code_write(ptr tc, IGEN maxg, IBOOL on, IBOOL current, void * if (!on) { while ((sip = tgc->sweep_next[0][space_code]) != NULL) { tgc->sweep_next[0][space_code] = sip->sweep_next; - if (sip->generation == 0) { - addr = sip->sweep_start; - bytes = sip->sweep_bytes; - if (mprotect(addr, bytes, flags) != 0) { - S_error_abort("failed to protect recent allocation segments"); - } + addr = sip->sweep_start; + bytes = sip->sweep_bytes; + if (mprotect(addr, bytes, flags) != 0) { + S_error_abort("failed to protect recent allocation segments"); } } }