From 9cf6659bf0576fe28b1ba6f4b93bf90f136e72dc Mon Sep 17 00:00:00 2001 From: Matthew Flatt Date: Wed, 23 Apr 2014 18:22:24 -0600 Subject: [PATCH] avoid write-barrier memory protection for 'atomic-interior allocation Split 'atomic-interior allocation to separate pages from other 'interior allocation, and do not page-protect 'atomic-interior memory. Otherwise, atomic-interior memory passed from one place to another --- especially via the `#:in-original-place?` option on foreign functions --- can crash due to triggering a write barrier in the wrong place. Commit c18f6e8d6d, which changed some cross-place arguments to Pango and Cairo to be 'atomic-interior, exposed the problem. Some indentation is left bad intentionally to minimize the diff. Merge to v6.0.1 --- .../scribblings/inside/memory.scrbl | 13 ++- .../tests/racket/remote-atomic-write.rkt | 26 ++++++ racket/src/racket/gc2/newgc.c | 82 ++++++++++++------- racket/src/racket/gc2/newgc.h | 4 +- 4 files changed, 94 insertions(+), 31 deletions(-) create mode 100644 pkgs/racket-pkgs/racket-test/tests/racket/remote-atomic-write.rkt diff --git a/pkgs/racket-pkgs/racket-doc/scribblings/inside/memory.scrbl b/pkgs/racket-pkgs/racket-doc/scribblings/inside/memory.scrbl index cb4e0c803e..575c37e475 100644 --- a/pkgs/racket-pkgs/racket-doc/scribblings/inside/memory.scrbl +++ b/pkgs/racket-pkgs/racket-doc/scribblings/inside/memory.scrbl @@ -41,7 +41,7 @@ The basic collector allocation functions are: that does not contain pointers to collectable objects. If the memory does contain pointers, they are invisible to the collector and will not prevent an object from being collected. Newly allocated atomic - memory is not necessary zeroed. + memory is not necessarily zeroed. Atomic memory is used for strings or other blocks of memory which do not contain pointers. Atomic memory can also be used to store @@ -117,6 +117,17 @@ allocator, on anytime that Racket has control, except during functions that are documented otherwise. The predicate and accessor macros listed in @secref["im:stdtypes"] never trigger a collection. +As described in @secref["im:3m:places"], different @|tech-place|s +manage allocation separately. Movable memory should not be +communicated from one place to another, since the source place might +move the memory before it is used in the destination place. +Furthermore, allocated memory that contains pointers must not be +written in a @|tech-place| other than the one where it is allocated, +due to the place-specific implementation of a write barrier for +generational garbage collection. No write barrier is used for memory +that is allocated by @cppi{scheme_malloc_atomic_allow_interior} to +contain no pointers. + @; ---------------------------------------------------------------------- @section[#:tag "im:3m"]{Cooperating with 3m} diff --git a/pkgs/racket-pkgs/racket-test/tests/racket/remote-atomic-write.rkt b/pkgs/racket-pkgs/racket-test/tests/racket/remote-atomic-write.rkt new file mode 100644 index 0000000000..18a168c0cb --- /dev/null +++ b/pkgs/racket-pkgs/racket-test/tests/racket/remote-atomic-write.rkt @@ -0,0 +1,26 @@ +#lang racket/base +(require ffi/unsafe + racket/place) + +;; Check that atomic memory allocated in one place can be written +;; in another place without triggering a write barrier (and crashing) + +(define c-memset + (get-ffi-obj 'memset #f + (_fun #:in-original-place? #t _pointer _int _size -> _void))) + +(define N 50) + +(define (go) + (place + ch + (define s (malloc N 'atomic-interior)) + (collect-garbage) + (c-memset s 65 N) + (place-channel-put ch (ptr-ref s _byte (quotient N 2))))) + +(module+ main + (define p (go)) + (unless (equal? 65 (place-channel-get p)) + (error "crash")) + 'ok) diff --git a/racket/src/racket/gc2/newgc.c b/racket/src/racket/gc2/newgc.c index 936a906fb2..fb7c3f5d92 100644 --- a/racket/src/racket/gc2/newgc.c +++ b/racket/src/racket/gc2/newgc.c @@ -97,10 +97,17 @@ enum { PAGE_TARRAY = 3, PAGE_PAIR = 4, PAGE_BIG = 5, - /* the number of page types. */ + /* the number of page types: */ PAGE_TYPES = 6, }; +enum { + MED_PAGE_NONATOMIC = 0, + MED_PAGE_ATOMIC = 1, + /* the number of medium-page types: */ + MED_PAGE_TYPES = 2 +}; + enum { SIZE_CLASS_SMALL_PAGE = 0, SIZE_CLASS_MED_PAGE = 1, @@ -1041,12 +1048,16 @@ static void *allocate_big(const size_t request_size_bytes, int type) } } #define MED_NEXT_SEARCH_SLOT(page) ((page)->previous_size) -inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int pos) { +inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int pos, int type) { mpage *page; - int n; + int n, ty; + + ty = ((type == PAGE_ATOMIC) ? MED_PAGE_ATOMIC : MED_PAGE_NONATOMIC); page = malloc_mpage(); - page->addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE, MMU_ZEROED, MMU_BIG_MED, MMU_PROTECTABLE, &page->mmu_src_block); + page->addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE, MMU_ZEROED, MMU_BIG_MED, + (type == MED_PAGE_NONATOMIC) ? MMU_PROTECTABLE : MMU_NON_PROTECTABLE, + &page->mmu_src_block); page->size = sz; page->size_class = 1; page->page_type = PAGE_BIG; @@ -1061,11 +1072,11 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p } /* push page onto linked list */ - page->next = gc->med_pages[pos]; + page->next = gc->med_pages[ty][pos]; if (page->next) page->next->prev = page; - gc->med_pages[pos] = page; - gc->med_freelist_pages[pos] = page; + gc->med_pages[ty][pos] = page; + gc->med_freelist_pages[ty][pos] = page; if (gc->saved_allocator) { /* see MESSAGE ALLOCATION above */ orphan_page_accounting(gc, APAGE_SIZE); @@ -1079,10 +1090,12 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p } inline static void *medium_page_realloc_dead_slot(NewGC *gc, const int sz, const int pos, const int type) { - int n; + int n, ty; mpage *page; - for (page = gc->med_freelist_pages[pos]; page; page = gc->med_freelist_pages[pos] = page->prev) { + ty = ((type == PAGE_ATOMIC) ? MED_PAGE_ATOMIC : MED_PAGE_NONATOMIC); + + for (page = gc->med_freelist_pages[ty][pos]; page; page = gc->med_freelist_pages[ty][pos] = page->prev) { for (n = MED_NEXT_SEARCH_SLOT(page); ((n + sz) <= APAGE_SIZE); n += sz) { objhead * info = (objhead *)PTR(NUM(page->addr) + n); if (info->dead) { @@ -1152,7 +1165,7 @@ static void *allocate_medium(const size_t request_size_bytes, const int type) mpage *page; objhead *info; - page = create_new_medium_page(gc, sz, pos); + page = create_new_medium_page(gc, sz, pos, type); info = (objhead *)PTR(NUM(page->addr) + MED_NEXT_SEARCH_SLOT(page)); info->dead = 0; @@ -1160,6 +1173,7 @@ static void *allocate_medium(const size_t request_size_bytes, const int type) objptr = OBJHEAD_TO_OBJPTR(info); } + #if defined(GC_DEBUG_PAGES) && defined(MASTER_ALLOC_DEBUG) if (postmaster_and_master_gc(gc)) { GCVERBOSEprintf(gc, "MASTERGC_allocate_medium %zi %i %i %i %i %p\n", request_size_bytes, type, sz, pos, 1 << (pos +3), objptr); @@ -3622,7 +3636,7 @@ void GC_dump_with_traces(int flags, { NewGC *gc = GC_get_GC(); mpage *page; - int i, num_immobiles; + int i, ty, num_immobiles; GC_Immobile_Box *ib; static uintptr_t counts[MAX_DUMP_TAG], sizes[MAX_DUMP_TAG]; @@ -3683,8 +3697,9 @@ void GC_dump_with_traces(int flags, } } } + for (ty = 0; ty < MED_PAGE_TYPES; ty++) { for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (page = gc->med_pages[i]; page; page = page->next) { + for (page = gc->med_pages[ty][i]; page; page = page->next) { void **start = PPTR(NUM(page->addr) + PREFIX_SIZE); void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size); @@ -3713,6 +3728,7 @@ void GC_dump_with_traces(int flags, } } } + } num_immobiles = 0; for (ib = gc->immobile_boxes; ib; ib = ib->next) @@ -3750,11 +3766,12 @@ void GC_dump_with_traces(int flags, type_name[i], total_use, count)); } - GCWARN((GCOUTF, "Generation 1 [medium]:")); + for (ty = 0; ty < MED_PAGE_TYPES; ty++) { + GCWARN((GCOUTF, "Generation 1 [medium%s]:", (ty == MED_PAGE_ATOMIC) ? " atomic" : "")); for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - if (gc->med_pages[i]) { + if (gc->med_pages[ty][i]) { intptr_t count = 0, page_count = 0; - for (page = gc->med_pages[i]; page; page = page->next) { + for (page = gc->med_pages[ty][i]; page; page = page->next) { void **start = PPTR(NUM(page->addr) + PREFIX_SIZE); void **end = PPTR(NUM(page->addr) + APAGE_SIZE - page->size); @@ -3769,10 +3786,11 @@ void GC_dump_with_traces(int flags, } } GCWARN((GCOUTF, " %" PRIdPTR " [%" PRIdPTR "/%" PRIdPTR "]", - count, page_count, gc->med_pages[i]->size)); + count, page_count, gc->med_pages[ty][i]->size)); } } GCWARN((GCOUTF, "\n")); + } GCWARN((GCOUTF,"\n")); @@ -3864,7 +3882,7 @@ static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc) } for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (work = gc->med_pages[i]; work; work = work->next) { + for (work = gc->med_pages[MED_PAGE_NONATOMIC][i]; work; work = work->next) { if (work->generation) { reset_gen1_page(gc, work); } @@ -3889,7 +3907,7 @@ static void mark_backpointers(NewGC *gc) { if(!gc->gc_full) { mpage *work; - int i; + int i, ty; PageMap pagemap = gc->page_maps; /* if this is not a full collection, then we need to mark any pointers @@ -3934,8 +3952,9 @@ static void mark_backpointers(NewGC *gc) } } + for (ty = 0; ty < MED_PAGE_TYPES; ty++) { for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (work = gc->med_pages[i]; work; work = work->next) { + for (work = gc->med_pages[ty][i]; work; work = work->next) { if(work->back_pointers) { void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size); @@ -3943,6 +3962,7 @@ static void mark_backpointers(NewGC *gc) work->marked_on = 1; pagemap_add(pagemap, work); + if (ty == MED_PAGE_NONATOMIC) { while(start <= end) { objhead *info = (objhead *)start; if(!info->dead) { @@ -3952,9 +3972,11 @@ static void mark_backpointers(NewGC *gc) } start += info->size; } + } } } } + } } } @@ -4140,7 +4162,7 @@ static void killing_debug(NewGC *gc, mpage *page, objhead *info) { static void repair_heap(NewGC *gc) { mpage *page; - int i; + int i, ty; Fixup2_Proc *fixup_table = gc->fixup_table; #ifdef MZ_USE_PLACES int master_has_switched = postmaster_and_master_gc(gc); @@ -4304,8 +4326,9 @@ static void repair_heap(NewGC *gc) } } + for (ty = 0; ty < MED_PAGE_TYPES; ty++) { for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (page = gc->med_pages[i]; page; page = page->next) { + for (page = gc->med_pages[ty][i]; page; page = page->next) { #ifdef MZ_USE_PLACES if (master_has_switched || page->marked_on) #else @@ -4357,6 +4380,7 @@ static void repair_heap(NewGC *gc) } } } + } } static inline void gen1_free_mpage(PageMap pagemap, mpage *page) { @@ -4409,7 +4433,7 @@ inline static void gen0_free_big_pages(NewGC *gc) { static void clean_up_heap(NewGC *gc) { - int i; + int i, ty; uintptr_t memory_in_use = 0; PageMap pagemap = gc->page_maps; @@ -4446,11 +4470,12 @@ static void clean_up_heap(NewGC *gc) } } + for (ty = 0; ty < MED_PAGE_TYPES; ty++) { for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { mpage *work; mpage *prev = NULL, *next; - for (work = gc->med_pages[i]; work; work = next) { + for (work = gc->med_pages[ty][i]; work; work = next) { if (work->marked_on) { void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); void **end = PPTR(NUM(work->addr) + APAGE_SIZE - work->size); @@ -4475,7 +4500,7 @@ static void clean_up_heap(NewGC *gc) prev = work; } else { /* free the page */ - if(prev) prev->next = next; else gc->med_pages[i] = next; + if(prev) prev->next = next; else gc->med_pages[ty][i] = next; if(next) work->next->prev = prev; GCVERBOSEPAGE(gc, "Cleaning up MED PAGE NO OBJ", work); gen1_free_mpage(pagemap, work); @@ -4484,7 +4509,7 @@ static void clean_up_heap(NewGC *gc) /* Page wasn't touched in full GC, or gen-0 not touched, so we can free it. */ next = work->next; - if(prev) prev->next = next; else gc->med_pages[i] = next; + if(prev) prev->next = next; else gc->med_pages[ty][i] = next; if(next) work->next->prev = prev; GCVERBOSEPAGE(gc, "Cleaning up MED NO MARKEDON", work); gen1_free_mpage(pagemap, work); @@ -4498,7 +4523,8 @@ static void clean_up_heap(NewGC *gc) pagemap_add(pagemap, work); } } - gc->med_freelist_pages[i] = prev; + gc->med_freelist_pages[ty][i] = prev; + } } memory_in_use = add_no_overflow(memory_in_use, gc->phantom_count); @@ -4528,7 +4554,7 @@ static void unprotect_old_pages(NewGC *gc) } for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (page = gc->med_pages[i]; page; page = page->next) { + for (page = gc->med_pages[MED_PAGE_NONATOMIC][i]; page; page = page->next) { if (page->mprotected) { page->mprotected = 0; mmu_queue_write_unprotect_range(mmu, page->addr, real_page_size(page), page_mmu_type(page), &page->mmu_src_block); @@ -4564,7 +4590,7 @@ static void protect_old_pages(NewGC *gc) } for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (page = gc->med_pages[i]; page; page = page->next) { + for (page = gc->med_pages[MED_PAGE_NONATOMIC][i]; page; page = page->next) { if (!page->mprotected) { page->back_pointers = 0; page->mprotected = 1; diff --git a/racket/src/racket/gc2/newgc.h b/racket/src/racket/gc2/newgc.h index 1aad1efc05..df240f74be 100644 --- a/racket/src/racket/gc2/newgc.h +++ b/racket/src/racket/gc2/newgc.h @@ -142,8 +142,8 @@ typedef struct NewGC { /* All non-gen0 pages are held in the following structure. */ struct mpage *gen1_pages[PAGE_TYPES]; - struct mpage *med_pages[NUM_MED_PAGE_SIZES]; - struct mpage *med_freelist_pages[NUM_MED_PAGE_SIZES]; + struct mpage *med_pages[MED_PAGE_TYPES][NUM_MED_PAGE_SIZES]; + struct mpage *med_freelist_pages[MED_PAGE_TYPES][NUM_MED_PAGE_SIZES]; MarkSegment *mark_stack;