diff --git a/racket/src/racket/gc2/block_cache.c b/racket/src/racket/gc2/block_cache.c index 0f9ab8a729..f9f399a766 100644 --- a/racket/src/racket/gc2/block_cache.c +++ b/racket/src/racket/gc2/block_cache.c @@ -434,14 +434,19 @@ static void block_cache_protect_one_page(BlockCache* bc, void *p, size_t len, in GC_ASSERT(pos >= 0); GC_ASSERT(pos < (b->size >> LOG_APAGE_SIZE)); GC_ASSERT(BD_MAP_GET_BIT(b->alloc_map, pos)); + /* Since a queued mprotect affects more pages than the client can be sure of, + we have to accomodate redundant requests. */ if (writeable) { - GC_ASSERT(BD_MAP_GET_BIT(b->protect_map, pos)); - BD_MAP_UNSET_BIT(b->protect_map, pos); + if (BD_MAP_GET_BIT(b->protect_map, pos)) { + BD_MAP_UNSET_BIT(b->protect_map, pos); + os_protect_pages(p, len, writeable); + } } else { - GC_ASSERT(!BD_MAP_GET_BIT(b->protect_map, pos)); - BD_MAP_SET_BIT(b->protect_map, pos); + if (!BD_MAP_GET_BIT(b->protect_map, pos)) { + BD_MAP_SET_BIT(b->protect_map, pos); + os_protect_pages(p, len, writeable); + } } - os_protect_pages(p, len, writeable); } break; default: diff --git a/racket/src/racket/gc2/newgc.c b/racket/src/racket/gc2/newgc.c index 42b0112352..8cc58863d1 100644 --- a/racket/src/racket/gc2/newgc.c +++ b/racket/src/racket/gc2/newgc.c @@ -4225,7 +4225,7 @@ static void mark_backpointers(NewGC *gc) GC_ASSERT(work->back_pointers); if (work->mprotected) { - /* expected only if QUEUED_MPROTECT_IS_PROMISCUOUS && AGE_GEN_0_TO_GEN_HALF(gc) */ + /* expected only if QUEUED_MPROTECT_INFECTS_XXX && AGE_GEN_0_TO_GEN_HALF(gc) */ work->mprotected = 0; mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work), page_mmu_type(work), &work->mmu_src_block); } @@ -4909,7 +4909,7 @@ static void protect_old_pages(NewGC *gc) if (!page->mprotected && !page->back_pointers) { page->mprotected = 1; mmu_queue_write_protect_range(mmu, page->addr, real_page_size(page), page_mmu_type(page), &page->mmu_src_block); - } else if (QUEUED_MPROTECT_IS_PROMISCUOUS) + } else if (QUEUED_MPROTECT_INFECTS_SMALL) page->mprotected = 1; } } @@ -4921,7 +4921,7 @@ static void protect_old_pages(NewGC *gc) if (!page->mprotected && !page->back_pointers) { page->mprotected = 1; mmu_queue_write_protect_range(mmu, page->addr, APAGE_SIZE, page_mmu_type(page), &page->mmu_src_block); - } else if (QUEUED_MPROTECT_IS_PROMISCUOUS) + } else if (QUEUED_MPROTECT_INFECTS_MED) page->mprotected = 1; } } diff --git a/racket/src/racket/gc2/vm.c b/racket/src/racket/gc2/vm.c index 92d22f7e40..53795283ce 100644 --- a/racket/src/racket/gc2/vm.c +++ b/racket/src/racket/gc2/vm.c @@ -28,10 +28,11 @@ enum { #ifdef USE_BLOCK_CACHE # define USE_ALLOC_CACHE -# define QUEUED_MPROTECT_IS_PROMISCUOUS 1 +# define QUEUED_MPROTECT_INFECTS_SMALL 1 #else -# define QUEUED_MPROTECT_IS_PROMISCUOUS 0 +# define QUEUED_MPROTECT_INFECTS_SMALL 0 #endif +#define QUEUED_MPROTECT_INFECTS_MED 0 /* Either USE_ALLOC_CACHE or OS_ALLOCATOR_NEEDS_ALIGNMENT must be enabled, unless the lower-level allocator's alignment matches