From 41d1daf53cfd1a6c664ccd5fd0e9d263936f162c Mon Sep 17 00:00:00 2001 From: Kevin Tew Date: Tue, 1 Jun 2010 10:01:34 -0600 Subject: [PATCH] New mmu block layer to avoid uneeded mprotects --- src/racket/gc2/Makefile.in | 2 +- src/racket/gc2/alloc_cache.c | 110 +++--- src/racket/gc2/block_cache.c | 371 ++++++++++++++++++++ src/racket/gc2/gclist.h | 183 ++++++++++ src/racket/gc2/newgc.c | 624 ++++++++++++++++++++++----------- src/racket/gc2/newgc.h | 32 +- src/racket/gc2/page_range.c | 86 +++-- src/racket/gc2/protect_range.c | 45 --- src/racket/gc2/vm.c | 240 +++++++++++-- src/racket/gc2/vm_memalign.c | 10 +- src/racket/gc2/vm_mmap.c | 19 +- src/racket/gc2/vm_osk.c | 12 +- src/racket/gc2/vm_osx.c | 8 +- src/racket/gc2/vm_win.c | 14 +- src/racket/src/places.c | 2 +- 15 files changed, 1344 insertions(+), 414 deletions(-) create mode 100644 src/racket/gc2/block_cache.c create mode 100644 src/racket/gc2/gclist.h delete mode 100644 src/racket/gc2/protect_range.c diff --git a/src/racket/gc2/Makefile.in b/src/racket/gc2/Makefile.in index 8a6c49a756..1d41d49c12 100644 --- a/src/racket/gc2/Makefile.in +++ b/src/racket/gc2/Makefile.in @@ -323,6 +323,7 @@ main.@LTO@: $(XSRCDIR)/main.c gc2.@LTO@: \ $(srcdir)/alloc_cache.c \ + $(srcdir)/block_cache.c \ $(srcdir)/backtrace.c \ $(srcdir)/commongc_internal.h \ $(srcdir)/platforms.h \ @@ -351,7 +352,6 @@ gc2.@LTO@: \ $(srcdir)/newgc.c \ $(srcdir)/newgc.h \ $(srcdir)/page_range.c \ - $(srcdir)/protect_range.c \ $(srcdir)/rlimit_heapsize.c \ $(srcdir)/roots.c \ $(srcdir)/stack_comp.c \ diff --git a/src/racket/gc2/alloc_cache.c b/src/racket/gc2/alloc_cache.c index 3ac34b305a..bada4b3e79 100644 --- a/src/racket/gc2/alloc_cache.c +++ b/src/racket/gc2/alloc_cache.c @@ -1,43 +1,42 @@ /* Provides: - vm_malloc_pages --- usual interface - vm_free_pages --- usual interface - vm_flush_freed_pages --- usual interface + static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty) + static ssize_t void alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree) + static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff) Requires (defined earlier): - page_size --- in bytes my_qsort --- possibly from my_qsort.c + static void os_vm_free_pages(void *p, size_t len); + static void *os_vm_alloc_pages(size_t len); */ -/* interface to GC */ -/* -static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok); -static void vm_free_pages(void *p, size_t len); -static void vm_flush_freed_pages(void); -static void vm_protect_pages(void *p, size_t len, int writable); -*/ +/* Controls how often freed pages are actually returned to OS: */ +#define BLOCKFREE_UNMAP_AGE 3 -/* interface to OS */ -/* -static void os_vm_free_pages(void *p, size_t len); -static void *os_vm_alloc_pages(size_t len); -*/ -#define BLOCKFREE_UNMAP_AGE 1 +/* Controls size of the cache */ +#define BLOCKFREE_CACHE_SIZE 3000 -static int free_block_compare(const void *a, const void *b) +/* Controls how many extra pages are requested from OS at a time: */ +#define CACHE_SEED_PAGES 16 + +static AllocCacheBlock *alloc_cache_create() { + return ofm_malloc_zero(sizeof(AllocCacheBlock) * BLOCKFREE_CACHE_SIZE); +} + +static int alloc_cache_block_compare(const void *a, const void *b) { - if ((unsigned long)((FreeBlock *)a)->start < (unsigned long)((FreeBlock *)b)->start) + if ((unsigned long)((AllocCacheBlock *)a)->start < (unsigned long)((AllocCacheBlock *)b)->start) return -1; else return 1; } -static void alloc_cache_collapse_pages(FreeBlock *blockfree) +static void alloc_cache_collapse_pages(AllocCacheBlock *blockfree) { int i; int j; - /* sort by FreeBlock->start */ - my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(FreeBlock), free_block_compare); + /* sort by AllocCacheBlock->start */ + my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(AllocCacheBlock), alloc_cache_block_compare); /* collapse adjacent: */ j = 0; @@ -53,7 +52,7 @@ static void alloc_cache_collapse_pages(FreeBlock *blockfree) } } -inline static void *alloc_cache_find_pages(FreeBlock *blockfree, size_t len, size_t alignment, int dirty_ok) +inline static void *alloc_cache_find_pages(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok) { int i; void *r; @@ -103,14 +102,9 @@ inline static void *alloc_cache_find_pages(FreeBlock *blockfree, size_t len, siz return NULL; } -static void alloc_cache_return_mem(VM *vm, char *p, size_t len, int zeroed) +static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty) { int i; - FreeBlock *blockfree = vm->freeblocks; - - /* Round up to nearest page: */ - if (len & (page_size - 1)) - len += page_size - (len & (page_size - 1)); /* Try to free pages in larger blocks, since the OS may be slow. */ @@ -118,16 +112,16 @@ static void alloc_cache_return_mem(VM *vm, char *p, size_t len, int zeroed) if(blockfree[i].start && (blockfree[i].len < (1024 * 1024))) { if (p == blockfree[i].start + blockfree[i].len) { blockfree[i].len += len; - if (!zeroed) + if (dirty) blockfree[i].zeroed = 0; - return; + return 0; } if (p + len == blockfree[i].start) { blockfree[i].start = p; blockfree[i].len += len; - if (!zeroed) + if (dirty) blockfree[i].zeroed = 0; - return; + return 0; } } @@ -136,42 +130,36 @@ static void alloc_cache_return_mem(VM *vm, char *p, size_t len, int zeroed) blockfree[i].start = p; blockfree[i].len = len; blockfree[i].age = 0; - blockfree[i].zeroed = zeroed; - return; + blockfree[i].zeroed = !dirty; + return 0; } } /* Might help next time around: */ alloc_cache_collapse_pages(blockfree); - os_vm_free_pages(p, len); - - vm_memory_allocated_dec(vm, len); + os_free_pages(p, len); + return -len; } -static void vm_free_pages(VM *vm, void *p, size_t len) -{ - alloc_cache_return_mem(vm, p, len, 0); -} - -static void vm_flush_freed_pages(VM *vm) +static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree) { int i; - FreeBlock *blockfree = vm->freeblocks; - + ssize_t freed = 0; alloc_cache_collapse_pages(blockfree); for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) { if (blockfree[i].start) { if (blockfree[i].age == BLOCKFREE_UNMAP_AGE) { - os_vm_free_pages(blockfree[i].start, blockfree[i].len); - vm_memory_allocated_dec(vm, blockfree[i].len); + os_free_pages(blockfree[i].start, blockfree[i].len); + freed -= blockfree[i].len; blockfree[i].start = NULL; blockfree[i].len = 0; } else blockfree[i].age++; } } + return freed; } /* Instead of immediately freeing pages with munmap---only to mmap @@ -184,47 +172,41 @@ static void vm_flush_freed_pages(VM *vm) mechanism, but we do a bit of work to collapse adjacent pages in the cache. */ -static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) +static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff) { char *r; - FreeBlock *blockfree = vm->freeblocks; - - if (!page_size) - page_size = getpagesize(); - - /* Round up to nearest page: */ - if (len & (page_size - 1)) - len += page_size - (len & (page_size - 1)); /* Something from the cache, perhaps? */ r = alloc_cache_find_pages(blockfree, len, alignment, dirty_ok); if(!r) { /* attempt to allocate from OS */ - r = os_vm_alloc_pages(len + alignment); + size_t extra = alignment + CACHE_SEED_PAGES * APAGE_SIZE; + r = os_alloc_pages(len + extra); if(r == (void *)-1) { return NULL; } if (alignment) { /* We allocated too large so we can choose the alignment. */ - size_t extra = alignment; char *real_r = (char*)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1))); long pre_extra = real_r - r; /* in front extra */ - if (pre_extra) { os_vm_free_pages(r, pre_extra); } + if (pre_extra) { + /* printf("FREEING FRONT %p %lx\n", r, pre_extra); */ + os_free_pages(r, pre_extra); } /* in back extra exists */ if (pre_extra < extra) { if (pre_extra == 0) { /* Instead of actually unmapping, put it in the cache, and there's a good chance we can use it next time: */ - vm_memory_allocated_inc(vm, extra); - alloc_cache_return_mem(vm, real_r + len, extra, 1); + (*size_diff) += extra; + (*size_diff) += alloc_cache_free_page(blockfree, real_r + len, extra, 1); } - else { os_vm_free_pages(real_r + len, extra - pre_extra); } + else { os_free_pages(real_r + len, extra - pre_extra); } } r = real_r; } - vm_memory_allocated_inc(vm, len); + (*size_diff) += extra; } return r; diff --git a/src/racket/gc2/block_cache.c b/src/racket/gc2/block_cache.c new file mode 100644 index 0000000000..653ff0f995 --- /dev/null +++ b/src/racket/gc2/block_cache.c @@ -0,0 +1,371 @@ +/******************************************************************************/ +/* OS-specific low-level allocator */ +/******************************************************************************/ +#include "gclist.h" +/* requires: */ +static void *os_alloc_pages(size_t len); +static void os_free_pages(void *p, size_t len); +static void os_protect_pages(void *p, size_t len, int writable); + +#define BC_BLOCK_SIZE (1 << 24) /* 16 MB */ + +struct block_desc; +static AllocCacheBlock *alloc_cache_create(); +static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty); +static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree); +static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff); + +static Page_Range *page_range_create(); +static void page_range_flush(Page_Range *pr, int writeable); +static void page_range_add(Page_Range *pr, void *_start, unsigned long len, int writeable); + + +#ifdef BC_ASSERTS +static int block_cache_chain_stat(GCList *head, int *bcnt); +#endif + +struct block_group; +typedef struct block_desc { + GCList gclist; + void *block; + void *free; + long size; + long used; + long totalcnt; + long freecnt; + struct block_group *group; +} block_desc; + +typedef struct block_group { + GCList full; + GCList free; + int atomic; +} block_group; + +typedef struct BlockCache { + block_group atomic; + block_group non_atomic; + AllocCacheBlock *bigBlockCache; + Page_Range *page_range; + MMU *mmu; +} BlockCache; + +typedef struct pfree_list { + void *next; + int dirty; +} pfree_list; + +static BlockCache* block_cache_create(MMU *mmu) { + BlockCache *bc = ofm_malloc_zero(sizeof(BlockCache)); + gclist_init(&bc->atomic.full); + gclist_init(&bc->atomic.free); + bc->atomic.atomic = 1; + gclist_init(&bc->non_atomic.full); + gclist_init(&bc->non_atomic.free); + bc->atomic.atomic = 0; + bc->bigBlockCache = alloc_cache_create(); + bc->page_range = page_range_create(); + bc->mmu = mmu; + return bc; +} + +static void block_cache_free(BlockCache* bc) { + free(bc); +} + +static block_desc *bc_alloc_std_block(block_group *bg) { + void *r = os_alloc_pages(BC_BLOCK_SIZE); + void *ps = align_up_ptr(r, APAGE_SIZE); + + block_desc *bd = (block_desc*) ofm_malloc(sizeof(block_desc)); + bd->block = r; + bd->free = ps; + bd->size = BC_BLOCK_SIZE; + bd->used = 0; + bd->group = bg; + gclist_init(&bd->gclist); + + /* printf("ALLOC BLOCK %p-%p size %li %li %li %p\n", bd->block, bd->block + bd->size, bd->size, APAGE_SIZE, bd->size / APAGE_SIZE, bd->free); */ + /* free unaligned portion */ + { + long diff = ps -r; + if (diff) { + long enddiff = APAGE_SIZE - diff; + os_free_pages(r, diff); + os_free_pages(r + BC_BLOCK_SIZE - enddiff, enddiff); + bd->block = ps; + bd->size = BC_BLOCK_SIZE - APAGE_SIZE; + /* printf("UNALIGNED FROM OS %p %li %li\n", r, diff, enddiff); */ + } + } + + + /* setup free list of APAGE_SIZE sized pages inside block */ + { + int i = 0; + pfree_list *pe = (bd->block + bd->size); + pfree_list *p = ps; + pfree_list *n; + while(p < pe) { + n = ((void*) p) + APAGE_SIZE; + p->next = n; + p->dirty = 0; + p = n; + i++; + } + bd->totalcnt = i; + bd->freecnt = i; + if (p > pe) { p = (p - (2 * APAGE_SIZE)); } + else { p = (p - APAGE_SIZE); } + p->next = NULL; + /* printf("ENDUP %p %p %p %i\n", n, p, p->next, i); */ + } + + return bd; +} + +static void *bc_alloc_std_page(BlockCache *bc, int dirty_ok, int expect_mprotect, void **src_block, ssize_t *size_diff) { + block_group *bg = (expect_mprotect ? &bc->non_atomic : &bc->atomic); + GCList *free_head = &bg->free; + int newbl = 0; + + tryagain: + if (!gclist_is_empty(free_head)) { + if (!gclist_first_item(free_head, block_desc*, gclist)->free) { + gclist_move(free_head->next, &bg->full); + goto tryagain; + } + } + else { + newbl = 1; + block_desc *bd = bc_alloc_std_block(bg); + gclist_add(free_head, &(bd->gclist)); + (*size_diff) += bd->size; + /* printf("ALLOC BLOCK %i %p %p-%p size %li %p\n", expect_mprotect, bg, bd->block, bd->block + bd->size, bd->size, bd->free); */ + } + + { + block_desc *bd = gclist_first_item(free_head, block_desc*, gclist); + pfree_list *fl = bd->free; + void *p = fl; + bd->free = fl->next; + bd->freecnt--; + + *src_block = bd; + if (expect_mprotect) { + GC_MP_CNT_INC(mp_alloc_med_big_cnt); + os_protect_pages(p, APAGE_SIZE, 1); + } + + if (!dirty_ok) { + if (fl->dirty) + memset(p, 0, APAGE_SIZE); + else + fl->next = 0; + } + +#if BC_ASSERTS + assert(p >= bd->block); + assert(p+APAGE_SIZE <= bd->block + bd->size); + if (!bg->atomic) + { + int afub = 0; + int afrb = 0; + int nafub = 0; + int nafrb = 0; + int afu = block_cache_chain_stat(&bc->atomic.full, &afub); + int afr = block_cache_chain_stat(&bc->atomic.free, &afrb); + int nafu = block_cache_chain_stat(&bc->non_atomic.full, &nafub); + int nafr = block_cache_chain_stat(&bc->non_atomic.free, &nafrb); + printf("ALLOC PAGE %i %p %p-%p %03i %03i %04i %04i : %03i %03i %03i %03i %09i %s\n", expect_mprotect, bg, p, p + APAGE_SIZE, afu, afr, nafu, nafr, afub, afrb, nafub, nafrb, mmu_memory_allocated(bc->mmu), (newbl ? "NEW " : "")); + } +#endif + return p; + } +} + +static ssize_t bc_free_std_block(block_desc *b) { + ssize_t size_diff = 0; + gclist_del(&b->gclist); + os_free_pages(b->block, b->size); + size_diff -= b->size; + free(b); + return size_diff; +} + +static void *block_cache_alloc_page(BlockCache* bc, size_t len, size_t alignment, int dirty, int type, int expect_mprotect, void **src_block, ssize_t *size_diff) { + switch(type) { + case MMU_SMALL_GEN1: + return bc_alloc_std_page(bc, dirty, expect_mprotect, src_block, size_diff); + break; + default: + *(char**)src_block = (char*) ~0x0; + return alloc_cache_alloc_page(bc->bigBlockCache, len, APAGE_SIZE, dirty, size_diff); + break; + } +} + +#if BC_ASSERTS +static int find_addr_in_bd(GCList *head, void *p, char* msg) { + block_desc *b; + gclist_each_item(b, head, block_desc*, gclist) { + if (p >= b->block && p < b->block + b->size) { + return 1; + } + } + return 0; +} +#endif + +static ssize_t block_cache_free_page(BlockCache* bc, void *p, size_t len, int type, int expect_mprotect, void **src_block) { + switch(type) { + case MMU_SMALL_GEN1: + { + GCList *free_head = &((expect_mprotect ? &bc->non_atomic : &bc->atomic)->free); + block_desc *b = (block_desc*)(*src_block); + pfree_list *fl = p; + fl->next = b->free; + fl->dirty = 1; + b->free = fl; +#if BC_ASSERTS + assert(*src_block != (char*)~0x0); + assert(b->group == bg); +#endif + gclist_move(&b->gclist, free_head); + b->freecnt++; +#if BC_ASSERTS + if (!bg->atomic) + { + int afub = 0; + int afrb = 0; + int nafub = 0; + int nafrb = 0; + int afu = block_cache_chain_stat(&bc->atomic.full, &afub); + int afr = block_cache_chain_stat(&bc->atomic.free, &afrb); + int nafu = block_cache_chain_stat(&bc->non_atomic.full, &nafub); + int nafr = block_cache_chain_stat(&bc->non_atomic.free, &nafrb); + printf("FREE PAGE %i %p %p-%p %03i %03i %04i %04i : %03i %03i %03i %03i %09i\n", expect_mprotect, bg, p, p + APAGE_SIZE, afu, afr, nafu, nafr, afub, afrb, nafub, nafrb, mmu_memory_allocated(bc->mmu)); + } +#endif + return 0; + } + break; + default: +#if BC_ASSERTS + assert(!(find_addr_in_bd(&bc->atomic.full, p, "atomic full") || + find_addr_in_bd(&bc->atomic.free, p, "atomic freeblock") || + find_addr_in_bd(&bc->non_atomic.full, p, "non_atomic full") || + find_addr_in_bd(&bc->non_atomic.free, p, "non_atomic freeblock"))); + assert(*src_block == (char*)~0x0); +#endif + return alloc_cache_free_page(bc->bigBlockCache, p, len, MMU_DIRTY); + break; + } +} + +static int sort_full_to_empty(void *priv, GCList *a, GCList *b) { + block_desc *ba = gclist_item(a, block_desc*, gclist); + block_desc *bb = gclist_item(b, block_desc*, gclist); + + if ((ba->freecnt) <= (bb->freecnt)) { + return -1; + } + return 1; +} + +static void block_cache_prep_for_compaction(BlockCache* bc) { + gclist_sort(NULL, &bc->atomic.free, sort_full_to_empty); + gclist_sort(NULL, &bc->non_atomic.free, sort_full_to_empty); +#if 0 + { + block_desc *b; + gclist_each_item(b, &bc->atomic.free, block_desc*, gclist) { + printf(" ATOMIC %05li %03li %p\n", b->freecnt, b->totalcnt, b); } + gclist_each_item(b, &bc->non_atomic.free, block_desc*, gclist) { + printf("NONATOMIC %03li %03li %p\n", b->freecnt, b->totalcnt, b); + } + } +#endif +} + +static int block_cache_compact(void **src_block) { + block_desc *b = *src_block; + if (b->freecnt > (b->totalcnt/2)) { + return 1; + } + return 0; +} + +static ssize_t block_cache_flush_freed_pages(BlockCache* bc) { + block_desc *b; + block_desc *bn; + ssize_t size_diff = 0; + ssize_t alloc_cache_size_diff = 0; + + gclist_each_item_safe(b, bn, &bc->atomic.free, block_desc*, gclist) { + if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b); } + } + gclist_each_item_safe(b, bn, &bc->non_atomic.free, block_desc*, gclist) { + if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b); } + } + alloc_cache_size_diff = alloc_cache_flush_freed_pages(bc->bigBlockCache); + +#ifdef GC_MP_CNT + mp_bc_freed = -size_diff; + mp_ac_freed = -alloc_cache_size_diff; +#endif + + return size_diff + alloc_cache_size_diff; +} + +static void block_cache_queue_protect_range(BlockCache* bc, void *p, size_t len, int type, int writeable, void **src_block) { + switch(type) { + case MMU_SMALL_GEN1: +#if BC_ASSERTS + assert(!(find_addr_in_bd(&bc->atomic.full, p, "atomic full") || + find_addr_in_bd(&bc->atomic.free, p, "atomic freeblock"))); + assert(find_addr_in_bd(&bc->non_atomic.full, p, "non_atomic full") || + find_addr_in_bd(&bc->non_atomic.free, p, "non_atomic freeblock")); + assert(*src_block != (char*)~0x0); +#endif + return; + break; + default: +#if BC_ASSERTS + assert(*src_block == (char*)~0x0); +#endif + page_range_add(bc->page_range, p, len, writeable); + return; + break; + } +} + +static void block_cache_flush_protect_ranges(BlockCache* bc, int writeable) { + block_group *bg = &bc->non_atomic; + block_desc *b; + gclist_each_item(b, &bg->full, block_desc*, gclist) { + page_range_add(bc->page_range, b->block, b->size, writeable); + } + gclist_each_item(b, &bg->free, block_desc*, gclist) { + page_range_add(bc->page_range, b->block, b->size, writeable); + } + + page_range_flush(bc->page_range, writeable); +} + +#if BC_ASSERTS +static int block_cache_chain_stat(GCList *head, int *blcnt) { + block_desc *b; + int freecnt = 0; + gclist_each_item(b, head, block_desc*, gclist) { + pfree_list *fl; + int lfcnt = 0; + for (fl = b->free; fl; fl = fl->next) { + lfcnt++; + } + freecnt += lfcnt; + (*blcnt)++; + } + return freecnt; +} +#endif diff --git a/src/racket/gc2/gclist.h b/src/racket/gc2/gclist.h new file mode 100644 index 0000000000..778332e851 --- /dev/null +++ b/src/racket/gc2/gclist.h @@ -0,0 +1,183 @@ +#ifndef GCLIST_H +#define GCLIST_H + +/* design take form the linux double linked list implementation in include/linux/list.h */ + +typedef struct GCList { + struct GCList *next; + struct GCList *prev; +} GCList; + +#define GCLIST_HEAD(name) GCList name = { &(name), &(name) } + +static inline void gclist_init(GCList *list) { + list->next = list; + list->prev = list; +} + +static inline void __gclist_add(GCList *item, GCList *prev, GCList *next) { + next->prev = item; + item->next = next; + item->prev = prev; + prev->next = item; +} + +static inline void gclist_add(GCList *head, GCList *item) { + __gclist_add(item, head, head->next); +} + +static inline void gclist_add_tail(GCList *head, GCList *item) { + __gclist_add(item, head->prev, head); +} + +static inline void __gclist_del(GCList *prev, GCList *next) { + next->prev = prev; + prev->next = next; +} + +#define GCLIST_POISON1 ((void *)(0x00100100)) +#define GCLIST_POISON2 ((void *)(0x00200200)) +static inline void gclist_del(GCList *item) { + __gclist_del(item->prev, item->next); + item->next = GCLIST_POISON1; + item->prev = GCLIST_POISON2; +} + +static inline int gclist_is_last(GCList *head, GCList *list) { + return list->next == head; +} + +static inline int gclist_is_empty(GCList *head) { + return head->next == head; +} + +static inline void gclist_move(GCList *list, GCList *head) { + __gclist_del(list->prev, list->next); + gclist_add(head, list); +} + +static inline void gclist_move_tail(GCList *list, GCList *head) { + __gclist_del(list->prev, list->next); + gclist_add(head, list); +} + +static inline void __gclist_splice(GCList *item, GCList *prev, GCList *next) { + abort(); +} + +static inline void gclist_splice(GCList *head, GCList *list) { + if(!gclist_is_empty(list)) { __gclist_splice(list, head, head->next); } +} + +#define gclist_item(ptr, type, member) \ + ((type) (((void*)(ptr)) - ((void *) (&(((type) 0x0)->member))))) + +#define gclist_first_item(head, type, member) \ + gclist_item((head)->next, type, member) + +#define gclist_each_item(pos, head, type, member) \ + for (pos = gclist_item((head)->next, type, member); \ + &pos->member != (head); \ + pos = gclist_item(pos->member.next, type, member)) + +#define gclist_each_item_safe(pos, n, head, type, member) \ + for (pos = gclist_item((head)->next, type, member), \ + n = gclist_item(pos->member.next, type, member); \ + &pos->member != (head); \ + pos = n, \ + n = gclist_item(n->member.next, type, member)) + +#endif + + +/* merge sort */ +typedef int (*GCListCmp)(void *priv, GCList *a, GCList *b); +#define MAX_LIST_LENGTH_BITS 20 + +static GCList *merge(void *priv, GCListCmp cmp, GCList *a, GCList *b) { + GCList head; + GCList *tail = &head; + + while(a && b) { + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a = a->next; + } + else { + tail->next = b; + b = b->next; + } + tail = tail->next; + }; + + tail->next = a?:b; + return head.next; +} + +static void merge_and_restore_back_links(void *priv, GCListCmp cmp, GCList *head, GCList *a, GCList *b) { + GCList *tail = head; + while(a && b) { + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a->prev = tail; + a = a->next; + } + else { + tail->next = b; + b->prev = tail; + b = b->next; + } + tail = tail->next; + } + + tail->next = a?:b; + + do { + tail->next->prev = tail; + tail = tail->next; + } while(tail->next); + + tail->next = head; + head->prev = tail; +} + +static void gclist_sort(void *priv, GCList *head, GCListCmp cmp) { + GCList *part[MAX_LIST_LENGTH_BITS+1]; + int level; /* index into part[] */ + int max_level = 0; + GCList *list; + + if (gclist_is_empty(head)) return; + + memset(part, 0, sizeof(part)); + + head->prev->next = NULL; /* set end of list NULL */ + list = head->next; /* set list to first item in list */ + + while(list) { + GCList *cur = list; + list = list->next; + cur->next = NULL; + + for (level = 0; part[level]; level++) { + cur = merge(priv, cmp, part[level], cur); + part[level] = NULL; + } + if (level > max_level) { + if (level > MAX_LIST_LENGTH_BITS) { + printf("GCList is too long to sort"); + abort(); + } + max_level = level; + } + part[level] = cur; + } + + for (level = 0; level < max_level; level ++) { + if (part[level]) { + list = merge(priv, cmp, part[level], list); + } + } + + merge_and_restore_back_links(priv, cmp, head, part[max_level], list); +} diff --git a/src/racket/gc2/newgc.c b/src/racket/gc2/newgc.c index 006b7fd7e8..be1579fe2f 100644 --- a/src/racket/gc2/newgc.c +++ b/src/racket/gc2/newgc.c @@ -27,6 +27,27 @@ the nursery and pages being compacted. */ +/* #define GC_MP_CNT */ +/* GC MProtect Counters */ +#ifdef GC_MP_CNT +int mp_write_barrier_cnt; +int mp_mark_cnt; +int mp_alloc_med_big_cnt; +int mp_pr_add_cnt; +int mp_pr_call_cnt; +int mp_pr_ff_cnt; +int mp_gc_unprotect_cnt; +int mp_gc_protect_cnt; +int mp_gcs_cnt; +long mp_prev_compact_cnt; +long mp_compact_cnt; +long mp_bc_freed; +long mp_ac_freed; +#define GC_MP_CNT_INC(x) ((x)++) +#else +#define GC_MP_CNT_INC(x) /* empty */ +#endif + #define MZ_PRECISE_GC /* required for mz includes to work right */ #include #include @@ -83,6 +104,23 @@ enum { SIZE_CLASS_BIG_PAGE_MARKED = 3, }; +enum { + MMU_ZEROED = 0, + MMU_DIRTY = 1, +}; + +enum { + MMU_SMALL_GEN1 = 0, + MMU_BIG_MED = 1, + MMU_SMALL_GEN0 = 1, +}; + +enum { + MMU_NON_PROTECTABLE = 0, + MMU_PROTECTABLE = 1, +}; + + static const char *type_name[PAGE_TYPES] = { "tagged", "atomic", @@ -92,7 +130,6 @@ static const char *type_name[PAGE_TYPES] = { "big" }; - #include "newgc.h" THREAD_LOCAL_DECL(static NewGC *GC_instance); @@ -115,45 +152,41 @@ inline static int postmaster_and_master_gc(NewGC *gc) { inline static int postmaster_and_place_gc(NewGC *gc) { return (MASTERGC && gc != MASTERGC); } -static void master_collect_initiate(); +static void master_collect_initiate(NewGC *gc); #endif -#if defined(MZ_USE_PLACES) -/* -# define DEBUG_GC_PAGES -# define MASTER_ALLOC_DEBUG -# define KILLING_DEBUG -*/ -#endif - -#if defined(MZ_USE_PLACES) && defined(DEBUG_GC_PAGES) +#if defined(MZ_USE_PLACES) && defined(GC_DEBUG_PAGES) inline static size_t real_page_size(mpage* page); -static FILE *GCVERBOSEFH; -static FILE* gcdebugOUT() { - if (GCVERBOSEFH) { fflush(GCVERBOSEFH); } - else { GCVERBOSEFH = fopen("GCDEBUGOUT", "w"); } - return GCVERBOSEFH; +static FILE* gcdebugOUT(NewGC *gc) { + + if (gc->GCVERBOSEFH) { fflush(gc->GCVERBOSEFH); } + else { + char buf[50]; + sprintf(buf, "GCDEBUGOUT_%i", gc->place_id); + gc->GCVERBOSEFH = fopen(buf, "w"); + } + return gc->GCVERBOSEFH; } -static void GCVERBOSEprintf(const char *fmt, ...) { +static void GCVERBOSEprintf(NewGC *gc, const char *fmt, ...) { va_list ap; va_start(ap, fmt); - vfprintf(gcdebugOUT(), fmt, ap); + vfprintf(gcdebugOUT(gc), fmt, ap); va_end(ap); } -static void GCVERBOSEPAGE(const char *msg, mpage* page) { - NewGC *gc = GC_get_GC(); - if(postmaster_and_master_gc(gc)) { - GCVERBOSEprintf("%s %p: %p %p %p\n", msg, gc, page, page->addr, (void*)((long)page->addr + real_page_size(page))); - } +static void GCVERBOSEPAGE(NewGC *gc, const char *msg, mpage* page) { + GCVERBOSEprintf(gc, "%s %p: %p %p %p\n", msg, gc, page, page->addr, (void*)((long)page->addr + real_page_size(page))); } # ifdef KILLING_DEBUG static void killing_debug(NewGC *gc, mpage *page, objhead *info); +static void fprintf_debug(NewGC *gc, mpage *page, const char *msg, objhead *info, FILE* file, int check); # endif #else -# define GCVERBOSEPAGE(msg, page) /* EMPTY */ +# define GCVERBOSEPAGE(gc, msg, page) /* EMPTY */ +static void GCVERBOSEprintf(NewGC *gc, const char *fmt, ...) { +} #endif @@ -298,34 +331,21 @@ inline static void check_used_against_max(NewGC *gc, size_t len) } } -#include "page_range.c" - #include "vm.c" -#include "protect_range.c" - -static void *malloc_pages(NewGC *gc, size_t len, size_t alignment) +static void *malloc_pages(NewGC *gc, size_t len, size_t alignment, int dirty, int type, int expect_mprotect, void **src_block) { void *ptr; check_used_against_max(gc, len); - ptr = vm_malloc_pages(gc->vm, len, alignment, 0); + ptr = mmu_alloc_page(gc->mmu, len, alignment, dirty, type, expect_mprotect, src_block); if (!ptr) out_of_memory(); return ptr; } -static void *malloc_dirty_pages(NewGC *gc, size_t len, size_t alignment) -{ - void *ptr; - check_used_against_max(gc, len); - ptr = vm_malloc_pages(gc->vm, len, alignment, 1); - if (!ptr) out_of_memory(); - return ptr; -} - -static void free_pages(NewGC *gc, void *p, size_t len) +static void free_pages(NewGC *gc, void *p, size_t len, int type, int expect_mprotect, void **src_block) { gc->used_pages -= (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1); - vm_free_pages(gc->vm, p, len); + mmu_free_page(gc->mmu, p, len, type, expect_mprotect, src_block); } @@ -417,6 +437,108 @@ inline static mpage *pagemap_find_page(PageMap page_maps1, void *p) { #endif } +#if 0 +static void dump_page_map(NewGC *gc, const char *when) +{ +#ifdef SIXTY_FOUR_BIT_INTEGERS + unsigned long i; + unsigned long j; + unsigned long k; + PageMap page_maps1; + mpage ***page_maps2; + mpage **page_maps3; +#else + long i; +#endif + mpage *page; + + long skips = 0, did_one = 0; + + printf("Page map (%s):\n", when); + +#ifdef SIXTY_FOUR_BIT_INTEGERS + page_maps1 = gc->page_maps; + for (k=0; kpage_maps[i]; +#endif + if (page) { + char kind; + + while (skips) { + printf(" "); + skips--; + } + if (!page->generation) + kind = '0'; + else { + switch (page->page_type) { + case PAGE_TAGGED: + kind = 't'; + break; + case PAGE_ATOMIC: + kind = 'a'; + break; + case PAGE_ARRAY: + kind = 'r'; + break; + case PAGE_TARRAY: + kind = 'y'; + break; + case PAGE_XTAGGED: + kind = 'x'; + break; + default: + kind = '?'; + break; + } + } + + if (page->mprotected) { + if ((kind >= 'a') && (kind <= 'z')) + kind += 'A' - 'a'; + } + if (page->size_class) + kind += 1; + + printf("%c", kind); + + did_one = 1; + } else + skips++; + + if ((i & 63) == 63) { + if (did_one) { + printf("\n"); + did_one = 0; + } + skips = 0; + } +#ifdef SIXTY_FOUR_BIT_INTEGERS + } + } + } + } + } +#else + } +#endif +} +#else +static void dump_page_map(NewGC *gc, const char *when) +{ +} +#endif + + /* These procedures modify or use the page map. The page map provides us very fast mappings from pointers to the page the reside on, if any. The page map itself serves two important purposes: @@ -620,7 +742,7 @@ static inline void gc_if_needed_account_alloc_size(NewGC *gc, size_t allocate_si if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) { #ifdef MZ_USE_PLACES if (postmaster_and_master_gc(gc)) { - master_collect_initiate(); + master_collect_initiate(gc); } else { #endif @@ -674,17 +796,19 @@ static void *allocate_big(const size_t request_size_bytes, int type) We not only need APAGE_SIZE alignment, we need everything consisently mapped within an APAGE_SIZE segment. So round up. */ - if (type == PAGE_ATOMIC) - addr = malloc_dirty_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE); - else - addr = malloc_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE); - + bpage = malloc_mpage(); + + if (type == PAGE_ATOMIC) + addr = malloc_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE, MMU_DIRTY, MMU_BIG_MED, MMU_NON_PROTECTABLE, &bpage->mmu_src_block); + else + addr = malloc_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE, MMU_ZEROED, MMU_BIG_MED, MMU_PROTECTABLE, &bpage->mmu_src_block); + bpage->addr = addr; bpage->size = allocate_size; bpage->size_class = 2; bpage->page_type = type; - GCVERBOSEPAGE("NEW BIG PAGE", bpage); + GCVERBOSEPAGE(gc, "NEW BIG PAGE", bpage); /* push new bpage onto GC->gen0.big_pages */ bpage->next = gc->gen0.big_pages; @@ -704,13 +828,13 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p int n; page = malloc_mpage(); - page->addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE); + page->addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE, MMU_ZEROED, MMU_BIG_MED, MMU_PROTECTABLE, &page->mmu_src_block); page->size = sz; page->size_class = 1; page->page_type = PAGE_BIG; MED_NEXT_SEARCH_SLOT(page) = PREFIX_SIZE; page->live_size = sz; - GCVERBOSEPAGE("NEW MED PAGE", page); + GCVERBOSEPAGE(gc, "NEW MED PAGE", page); for (n = MED_NEXT_SEARCH_SLOT(page); ((n + sz) <= APAGE_SIZE); n += sz) { objhead *info = (objhead *)PTR(NUM(page->addr) + n); @@ -811,10 +935,10 @@ static void *allocate_medium(const size_t request_size_bytes, const int type) objptr = OBJHEAD_TO_OBJPTR(info); } -#if defined(DEBUG_GC_PAGES) && defined(MASTER_ALLOC_DEBUG) +#if defined(GC_DEBUG_PAGES) && defined(MASTER_ALLOC_DEBUG) if (postmaster_and_master_gc(gc)) { - GCVERBOSEprintf("MASTERGC_allocate_medium %zi %i %i %i %i %p\n", request_size_bytes, type, sz, pos, 1 << (pos +3), objptr); - /* print_libc_backtrace(gcdebugOUT()); */ + GCVERBOSEprintf(gc, "MASTERGC_allocate_medium %zi %i %i %i %i %p\n", request_size_bytes, type, sz, pos, 1 << (pos +3), objptr); + /* print_libc_backtrace(gcdebugOUT(gc)); */ } #endif @@ -825,22 +949,22 @@ static void *allocate_medium(const size_t request_size_bytes, const int type) #define GEN0_ALLOC_SIZE(page) ((page)->previous_size) inline static mpage *gen0_create_new_nursery_mpage(NewGC *gc, const size_t page_size) { - mpage *newmpage; + mpage *page; - newmpage = malloc_mpage(); - newmpage->addr = malloc_dirty_pages(gc, page_size, APAGE_SIZE); - newmpage->size_class = 0; - newmpage->size = PREFIX_SIZE; - GEN0_ALLOC_SIZE(newmpage) = page_size; - pagemap_add_with_size(gc->page_maps, newmpage, page_size); - GCVERBOSEPAGE("NEW gen0", newmpage); + page = malloc_mpage(); + page->addr = malloc_pages(gc, page_size, APAGE_SIZE, MMU_DIRTY, MMU_SMALL_GEN0, MMU_NON_PROTECTABLE, &page->mmu_src_block); + page->size_class = 0; + page->size = PREFIX_SIZE; + GEN0_ALLOC_SIZE(page) = page_size; + pagemap_add_with_size(gc->page_maps, page, page_size); + GCVERBOSEPAGE(gc, "NEW gen0", page); - return newmpage; + return page; } -inline static void gen0_free_nursery_mpage(NewGC *gc, mpage *page, size_t page_size) { +inline static void gen0_free_nursery_mpage(NewGC *gc, mpage *page, const size_t page_size) { pagemap_remove_with_size(gc->page_maps, page, page_size); - free_pages(gc, page->addr, page_size); + free_pages(gc, page->addr, page_size, MMU_SMALL_GEN0, MMU_NON_PROTECTABLE, &page->mmu_src_block); free_mpage(page); } @@ -909,7 +1033,7 @@ inline static void *allocate(const size_t request_size, const int type) size_t allocate_size; unsigned long newptr; - if(request_size == 0) return zero_sized; + if(request_size == 0) return (void *) zero_sized; allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size); if(allocate_size > MAX_OBJECT_SIZE) return allocate_big(request_size, type); @@ -938,7 +1062,7 @@ inline static void *allocate(const size_t request_size, const int type) GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE; } /* WARNING: tries to avoid a collection but - * malloc_pages can cause a collection due to check_used_against_max */ + * gen0_create_new_mpage can cause a collection via malloc_pages due to check_used_against_max */ else if (gc->dumping_avoid_collection) { mpage *new_mpage = gen0_create_new_mpage(gc); @@ -953,7 +1077,7 @@ inline static void *allocate(const size_t request_size, const int type) } else { #ifdef INSTRUMENT_PRIMITIVES - LOG_PRIM_START(((void*)garbage_collect)); + LOG_PRIM_START(((void*)garbage_collect)); #endif garbage_collect(gc, 0, 0); @@ -1324,7 +1448,7 @@ static void backtrace_new_page(NewGC *gc, mpage *page) { /* This is a little wastefull for big pages, because we'll only use the first few words: */ - page->backtrace = (void **)malloc_pages(gc, APAGE_SIZE, APAGE_SIZE); + page->backtrace = (void **)malloc_pages(gc, APAGE_SIZE, APAGE_SIZE, MMU_ZEROED, MMU_BIGMED, MMU_NON_PROTECTABLE, &page->backtrace_page_src); } # define backtrace_new_page_if_needed(gc, page) if (!page->backtrace) backtrace_new_page(gc, page) @@ -1332,7 +1456,7 @@ static void backtrace_new_page(NewGC *gc, mpage *page) static void free_backtrace(mpage *page) { if (page->backtrace) - free_pages(GC, page->backtrace, APAGE_SIZE); + free_pages(GC, page->backtrace, APAGE_SIZE, MMU_BIG_MED, MMU_NON_PROTECTABLE, &page->backtrace_page_src); } static void *bt_source; @@ -1836,6 +1960,12 @@ int GC_merely_accounting() static inline size_t real_page_size(mpage *page) { return (page->size_class > 1) ? round_to_apage_size(page->size) : APAGE_SIZE; } +static inline int page_mmu_type(mpage *page) { + return (page->size_class >= 1) ? MMU_BIG_MED : MMU_SMALL_GEN1; +} +static inline int page_mmu_protectable(mpage *page) { + return (page->page_type == PAGE_ATOMIC) ? MMU_NON_PROTECTABLE : MMU_PROTECTABLE; +} static int designate_modified_gc(NewGC *gc, void *p) { @@ -1849,7 +1979,8 @@ static int designate_modified_gc(NewGC *gc, void *p) if(page) { if (!page->back_pointers) { page->mprotected = 0; - vm_protect_pages(page->addr, real_page_size(page), 1); + mmu_write_unprotect_page(gc->mmu, page->addr, real_page_size(page)); + GC_MP_CNT_INC(mp_write_barrier_cnt); page->back_pointers = 1; } /* For a single mutator thread, we shouldn't get here @@ -1881,7 +2012,7 @@ void GC_write_barrier(void *p) #include "sighand.c" #ifdef MZ_USE_PLACES -typedef enum { +enum { SIGNALED_BUT_NOT_REGISTERED = -3, REAPED_SLOT_AVAILABLE = -2, CREATED_BUT_NOT_REGISTERED = -1, @@ -1917,7 +2048,7 @@ static void NewGCMasterInfo_cleanup() { wait_if_master_in_progress and rendezvous for a master gc */ /* this is only called from the master so the cangc lock should already be held */ -static void master_collect_initiate() { +static void master_collect_initiate(NewGC *gc) { if (MASTERGC->major_places_gc == 0) { int i = 0; int size = MASTERGCINFO->size; @@ -1929,9 +2060,9 @@ static void master_collect_initiate() { void *signal_fd = MASTERGCINFO->signal_fds[i]; if (signal_fd < (void*) -2) { scheme_signal_received_at(signal_fd); -#if defined(DEBUG_GC_PAGES) +#if defined(GC_DEBUG_PAGES) printf("%i SIGNALED BUT NOT COLLECTED\n", i); - GCVERBOSEprintf("%i SIGNALED BUT NOT COLLECTED\n", i); + GCVERBOSEprintf(gc, "%i SIGNALED BUT NOT COLLECTED\n", i); #endif count++; } @@ -1948,9 +2079,9 @@ static void master_collect_initiate() { printf("GC2 count != MASTERGCINFO->alive %i %li\n", count, MASTERGCINFO->alive); abort(); } -#if defined(DEBUG_GC_PAGES) +#if defined(GC_DEBUG_PAGES) printf("Woke up %i places for MASTER GC\n", count); - GCVERBOSEprintf("Woke up %i places for MASTER GC\n", count); + GCVERBOSEprintf(gc, "Woke up %i places for MASTER GC\n", count); #endif } } @@ -1959,15 +2090,16 @@ static void collect_master() { NewGC *saved_gc; saved_gc = GC_switch_to_master_gc(); { -#if defined(DEBUG_GC_PAGES) +#if defined(GC_DEBUG_PAGES) + NewGC *gc = GC_get_GC(); printf("START MASTER COLLECTION\n"); - GCVERBOSEprintf("START MASTER COLLECTION\n"); + GCVERBOSEprintf(gc, "START MASTER COLLECTION\n"); #endif MASTERGC->major_places_gc = 0; garbage_collect(MASTERGC, 1, 0); -#if defined(DEBUG_GC_PAGES) +#if defined(GC_DEBUG_PAGES) printf("END MASTER COLLECTION\n"); - GCVERBOSEprintf("END MASTER COLLECTION\n"); + GCVERBOSEprintf(gc, "END MASTER COLLECTION\n"); #endif { @@ -1989,9 +2121,10 @@ static void wait_if_master_in_progress(NewGC *gc) { { if (MASTERGC->major_places_gc == 1) { MASTERGCINFO->ready++; -#if defined(DEBUG_GC_PAGES) +#if defined(GC_DEBUG_PAGES) printf("%i READY\n", gc->place_id); - GCVERBOSEprintf("%i READY\n", gc->place_id); + GCVERBOSEprintf(gc, "%i READY\n", gc->place_id); + GCVERBOSEprintf(gc, "START MASTER COLLECTION\n"); #endif /* don't count MASTERGC*/ if ((MASTERGCINFO->alive -1) == MASTERGCINFO->ready) { @@ -2016,10 +2149,12 @@ static void wait_if_master_in_progress(NewGC *gc) { case 0: /* wait on semaphore */ mzrt_sema_wait(MASTERGCINFO->wait_sema); + GCVERBOSEprintf(gc, "END MASTER COLLECTION\n"); break; case 1: /* Your the last one here. */ collect_master(); + GCVERBOSEprintf(gc, "END MASTER COLLECTION\n"); break; default: printf("GC2 wait_if_master_in_progress invalid case, unreachable\n"); @@ -2108,8 +2243,7 @@ static void NewGC_initialize(NewGC *newgc, NewGC *parentgc) { newgc->page_maps = ofm_malloc_zero(PAGEMAP32_SIZE * sizeof (mpage*)); #endif - newgc->vm = vm_create(); - newgc->protect_range = ofm_malloc_zero(sizeof(Page_Range)); + newgc->mmu = mmu_create(newgc); newgc->generations_available = 1; newgc->last_full_mem_use = (20 * 1024 * 1024); @@ -2154,8 +2288,6 @@ static NewGC *init_type_tags_worker(NewGC *parentgc, int count, int pair, int mu GC_add_roots(&gc->park, (char *)&gc->park + sizeof(gc->park) + 1); GC_add_roots(&gc->park_save, (char *)&gc->park_save + sizeof(gc->park_save) + 1); - initialize_protect_page_ranges(gc->protect_range, malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE), APAGE_SIZE); - return gc; } @@ -2382,7 +2514,7 @@ void GC_mark2(const void *const_p, struct NewGC *gc) gc->gen0.big_pages = page->next; if(page->next) page->next->prev = page->prev; - GCVERBOSEPAGE("MOVING BIG PAGE TO GEN1", page); + GCVERBOSEPAGE(gc, "MOVING BIG PAGE TO GEN1", page); backtrace_new_page(gc, page); @@ -2482,13 +2614,15 @@ void GC_mark2(const void *const_p, struct NewGC *gc) work->marked_on = 1; if (work->mprotected) { work->mprotected = 0; - vm_protect_pages(work->addr, APAGE_SIZE, 1); + mmu_write_unprotect_page(gc->mmu, work->addr, APAGE_SIZE); + GC_MP_CNT_INC(mp_mark_cnt); } newplace = PTR(NUM(work->addr) + work->size); } else { + int protectable = (type == PAGE_ATOMIC) ? MMU_NON_PROTECTABLE : MMU_PROTECTABLE; /* Allocate and prep the page */ work = malloc_mpage(); - work->addr = malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE); + work->addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE, MMU_DIRTY, MMU_SMALL_GEN1, protectable, &work->mmu_src_block); work->generation = 1; work->page_type = type; work->size = work->previous_size = PREFIX_SIZE; @@ -2502,6 +2636,7 @@ void GC_mark2(const void *const_p, struct NewGC *gc) work->added = 1; gc->gen1_pages[type] = work; newplace = PAGE_TO_OBJHEAD(work); + GCVERBOSEPAGE(gc, "NEW SMALL GEN1 PAGE", work); } /* update the size */ @@ -2851,7 +2986,7 @@ void GC_dump_with_traces(int flags, GCWARN((GCOUTF,"Peak memory use after a collection: %li\n", gc->peak_memory_use)); GCWARN((GCOUTF,"Allocated (+reserved) page sizes: %li (+%li)\n", gc->used_pages * APAGE_SIZE, - vm_memory_allocated(gc->vm) - (gc->used_pages * APAGE_SIZE))); + mmu_memory_allocated(gc->mmu) - (gc->used_pages * APAGE_SIZE))); GCWARN((GCOUTF,"# of major collections: %li\n", gc->num_major_collects)); GCWARN((GCOUTF,"# of minor collections: %li\n", gc->num_minor_collects)); GCWARN((GCOUTF,"# of installed finalizers: %i\n", gc->num_fnls)); @@ -2898,9 +3033,9 @@ void *GC_next_tagged_start(void *p) static void reset_gen1_page(NewGC *gc, mpage *work) { - if (gc->generations_available && work->mprotected) { + if (gc->generations_available) { work->mprotected = 0; - add_protect_page_range(gc->protect_range, work->addr, real_page_size(work), APAGE_SIZE, 1); + mmu_queue_write_unprotect_range(gc->mmu, work->addr, real_page_size(work), page_mmu_type(work), &work->mmu_src_block); } } @@ -2908,6 +3043,9 @@ static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc) { mpage *work; int i; +#ifdef GC_MP_CNT + mp_gc_unprotect_cnt = mp_pr_add_cnt; +#endif GCDEBUG((DEBUGOUTF, "MAJOR COLLECTION - PREPPING PAGES - reset live_size, reset previous_size, unprotect.\n")); /* we need to make sure that previous_size for every page is reset, so @@ -2915,7 +3053,9 @@ static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc) for(i = 0; i < PAGE_TYPES; i++) { for(work = gc->gen1_pages[i]; work; work = work->next) { - reset_gen1_page(gc, work); + if(i != PAGE_ATOMIC && work->page_type != PAGE_ATOMIC) { + reset_gen1_page(gc, work); + } work->live_size = 0; work->previous_size = PREFIX_SIZE; } @@ -2929,17 +3069,10 @@ static void reset_gen1_pages_live_and_previous_sizes(NewGC *gc) } } - flush_protect_page_ranges(gc->protect_range, 1); -} - -static void remove_gen1_page_from_pagemap(NewGC *gc, mpage *work) -{ - if (gc->generations_available && work->back_pointers && work->mprotected) { - work->mprotected = 0; - add_protect_page_range(gc->protect_range, work->addr, real_page_size(work), APAGE_SIZE, 1); - } - pagemap_remove(gc->page_maps, work); - work->added = 0; + mmu_flush_write_unprotect_ranges(gc->mmu); +#ifdef GC_MP_CNT + mp_gc_unprotect_cnt = mp_pr_add_cnt - mp_gc_unprotect_cnt; +#endif } static void remove_all_gen1_pages_from_pagemap(NewGC *gc) @@ -2954,19 +3087,20 @@ static void remove_all_gen1_pages_from_pagemap(NewGC *gc) for(i = 0; i < PAGE_TYPES; i++) { for(work = gc->gen1_pages[i]; work; work = work->next) { - remove_gen1_page_from_pagemap(gc, work); + pagemap_remove(gc->page_maps, work); + work->added = 0; } } for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { for (work = gc->med_pages[i]; work; work = work->next) { if (work->generation) { - remove_gen1_page_from_pagemap(gc, work); + pagemap_remove(gc->page_maps, work); + work->added = 0; } } } - - flush_protect_page_ranges(gc->protect_range, 1); + mmu_flush_write_unprotect_ranges(gc->mmu); } static void mark_backpointers(NewGC *gc) @@ -3045,9 +3179,8 @@ static void mark_backpointers(NewGC *gc) mpage *allocate_compact_target(NewGC *gc, mpage *work) { mpage *npage; - npage = malloc_mpage(); - npage->addr = malloc_dirty_pages(gc, APAGE_SIZE, APAGE_SIZE); + npage->addr = malloc_pages(gc, APAGE_SIZE, APAGE_SIZE, MMU_DIRTY, MMU_SMALL_GEN1, page_mmu_protectable(work), &npage->mmu_src_block); npage->previous_size = npage->size = PREFIX_SIZE; npage->generation = 1; npage->back_pointers = 0; @@ -3055,7 +3188,7 @@ mpage *allocate_compact_target(NewGC *gc, mpage *work) npage->page_type = work->page_type; npage->marked_on = 1; backtrace_new_page(gc, npage); - GCVERBOSEPAGE("NEW COMPACT PAGE", npage); + GCVERBOSEPAGE(gc, "NEW COMPACT PAGE", npage); /* Link in this new replacement page */ npage->prev = work; npage->next = work->next; @@ -3072,7 +3205,12 @@ mpage *allocate_compact_target(NewGC *gc, mpage *work) inline static void do_heap_compact(NewGC *gc) { int i; + int tic_tock = gc->num_major_collects % 2; PageMap pagemap = gc->page_maps; + mmu_prep_for_compaction(gc->mmu); +#ifdef GC_MP_CNT + mp_prev_compact_cnt = mp_compact_cnt; +#endif for(i = 0; i < PAGE_BIG; i++) { mpage *work = gc->gen1_pages[i], *prev, *npage; @@ -3087,7 +3225,8 @@ inline static void do_heap_compact(NewGC *gc) while(work) { if(work->marked_on && !work->has_new) { /* then determine if we actually want to do compaction */ - if(should_compact_page(gcWORDS_TO_BYTES(work->live_size),work->size)) { + if( tic_tock ? should_compact_page(gcWORDS_TO_BYTES(work->live_size),work->size) : + mmu_should_compact_page(gc->mmu, work->mmu_src_block)) { void **start = PAGE_START_VSS(work); void **end = PAGE_END_VSS(work); void **newplace; @@ -3118,11 +3257,16 @@ inline static void do_heap_compact(NewGC *gc) newplace = PPTR(NUM(npage->addr) + npage->size); } - if (npage->mprotected) { - npage->mprotected = 0; - vm_protect_pages(npage->addr, APAGE_SIZE, 1); +#if defined(GC_DEBUG_PAGES) + { + pagemap_add(pagemap, work); + fprintf(gcdebugOUT(gc), "Compacting from %p to %p \n", start+1, newplace+1); + fprintf_debug(gc, work, "Compacting", info, gcdebugOUT(gc), 0); } - +#endif +#ifdef GC_MP_CNT + mp_compact_cnt += gcWORDS_TO_BYTES(info->size); +#endif GCDEBUG((DEBUGOUTF,"Moving size %i object from %p to %p\n", gcWORDS_TO_BYTES(info->size), start+1, newplace+1)); memcpy(newplace, start, gcWORDS_TO_BYTES(info->size)); @@ -3132,7 +3276,7 @@ inline static void do_heap_compact(NewGC *gc) newplace += info->size; avail -= info->size; } - start += info->size; + start += info->size; } npage->size = NUM(newplace) - NUM(npage->addr); @@ -3175,39 +3319,39 @@ static void fprintf_buffer(FILE* file, char* buf, int l) { fprintf(file, "\n"); } -static void fprintf_debug(NewGC *gc, mpage *page, const char *msg, objhead *info, FILE* file, int isgc) { - if (!isgc || postmaster_and_master_gc(gc)) { - Scheme_Object *obj = OBJHEAD_TO_OBJPTR(info); - fprintf(file, "%s %p ot %i it %i im %i is %i is >> 3 %i %p %i\n", msg, obj, obj->type, info->type, info->mark, info->size, info->size >> 3, page, page->marked_on); - switch (obj->type) { - case scheme_unix_path_type: - if (pagemap_find_page(gc->page_maps, SCHEME_PATH_VAL(obj))) { - fprintf_buffer(file, SCHEME_PATH_VAL(obj), SCHEME_PATH_LEN(obj)); - } - else { - fprintf(file, "%p already freed and out of bounds\n", SCHEME_PATH_VAL(obj)); - } - break; - case scheme_symbol_type: - fprintf_buffer(file, SCHEME_SYM_VAL(obj), SCHEME_SYM_LEN(obj)); - break; - case scheme_resolved_module_path_type: - if (pagemap_find_page(gc->page_maps, SCHEME_PTR_VAL(obj))) { - /* - fprintf_debug(gc, page, "RMP ", OBJPTR_TO_OBJHEAD(SCHEME_PTR_VAL(obj)), file, isgc); - */ - } - else { - fprintf(file, "RMP %p already freed and out of bounds\n", SCHEME_PATH_VAL(obj)); - } - default: - fprintf_buffer(file, ((char *)obj), (info->size * WORD_SIZE) - sizeof(objhead)); - break; - } +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define INFO_SIZE_BYTES(info) ((info->size * WORD_SIZE) - sizeof(objhead)) +static void fprintf_debug(NewGC *gc, mpage *page, const char *msg, objhead *info, FILE* file, int check) { + Scheme_Object *obj = OBJHEAD_TO_OBJPTR(info); + fprintf(file, "%s obj %p ot %i it %i im %i is %i is >> 3 %i page %p pmo %i\n", msg, obj, obj->type, info->type, info->mark, info->size, info->size >> 3, page, page->marked_on); + switch (obj->type) { + case scheme_unix_path_type: + if (pagemap_find_page(gc->page_maps, MIN(SCHEME_PATH_VAL(obj), INFO_SIZE_BYTES(info)))) { + fprintf_buffer(file, SCHEME_PATH_VAL(obj), SCHEME_PATH_LEN(obj)); + } + else { + fprintf(file, "%p already freed and out of bounds\n", SCHEME_PATH_VAL(obj)); + } + break; + case scheme_symbol_type: + fprintf_buffer(file, SCHEME_SYM_VAL(obj), MIN(SCHEME_SYM_LEN(obj), INFO_SIZE_BYTES(info))); + break; + case scheme_resolved_module_path_type: + if (pagemap_find_page(gc->page_maps, SCHEME_PTR_VAL(obj))) { + /* + fprintf_debug(gc, page, "RMP ", OBJPTR_TO_OBJHEAD(SCHEME_PTR_VAL(obj)), file, check); + */ + } + else { + fprintf(file, "RMP %p already freed and out of bounds\n", SCHEME_PATH_VAL(obj)); + } + default: + fprintf_buffer(file, ((char *)obj), (info->size * WORD_SIZE) - sizeof(objhead)); + break; } } static void killing_debug(NewGC *gc, mpage *page, objhead *info) { - fprintf_debug(gc, page, "killing", info, gcdebugOUT(), 1); + fprintf_debug(gc, page, "killing", info, gcdebugOUT(gc), 1); } #endif @@ -3264,6 +3408,11 @@ static void repair_heap(NewGC *gc) } #ifdef MZ_USE_PLACES } + else { +#ifdef KILLING_DEBUG + killing_debug(gc, page, info); +#endif + } #endif } else { void **start = PPTR(NUM(page->addr) + page->previous_size); @@ -3284,6 +3433,9 @@ static void repair_heap(NewGC *gc) fixup_table[tag](obj_start, gc); } else { info->dead = 1; +#ifdef KILLING_DEBUG + killing_debug(gc, page, info); +#endif } start += info->size; } @@ -3293,7 +3445,12 @@ static void repair_heap(NewGC *gc) objhead *info = (objhead *)start; if(info->mark) { info->mark = 0; - } else info->dead = 1; + } else { + info->dead = 1; +#ifdef KILLING_DEBUG + killing_debug(gc, page, info); +#endif + } start += info->size; } break; @@ -3308,6 +3465,9 @@ static void repair_heap(NewGC *gc) info->mark = 0; } else { info->dead = 1; +#ifdef KILLING_DEBUG + killing_debug(gc, page, info); +#endif start += size; } } @@ -3328,6 +3488,9 @@ static void repair_heap(NewGC *gc) start = PPTR(info) + size; } else { info->dead = 1; +#ifdef KILLING_DEBUG + killing_debug(gc, page, info); +#endif start += size; } } @@ -3338,7 +3501,12 @@ static void repair_heap(NewGC *gc) if(info->mark) { GC_fixup_xtagged(OBJHEAD_TO_OBJPTR(start)); info->mark = 0; - } else info->dead = 1; + } else { + info->dead = 1; +#ifdef KILLING_DEBUG + killing_debug(gc, page, info); +#endif + } start += info->size; } } @@ -3405,7 +3573,7 @@ static void repair_heap(NewGC *gc) static inline void gen1_free_mpage(PageMap pagemap, mpage *page) { pagemap_remove(pagemap, page); free_backtrace(page); - free_pages(GC_instance, page->addr, real_page_size(page)); + free_pages(GC_instance, page->addr, real_page_size(page), page_mmu_type(page), page_mmu_protectable(page), &page->mmu_src_block); free_mpage(page); } @@ -3416,7 +3584,7 @@ static inline void cleanup_vacated_pages(NewGC *gc) { /* Free pages vacated by compaction: */ while (pages) { mpage *next = pages->next; - GCVERBOSEPAGE("Cleaning up vacated", pages); + GCVERBOSEPAGE(gc, "Cleaning up vacated", pages); gen1_free_mpage(pagemap, pages); pages = next; } @@ -3429,11 +3597,11 @@ inline static void gen0_free_big_pages(NewGC *gc) { PageMap pagemap = gc->page_maps; for(work = gc->gen0.big_pages; work; work = next) { - GCVERBOSEPAGE("FREEING BIG PAGE", work); + GCVERBOSEPAGE(gc, "FREEING BIG PAGE", work); next = work->next; pagemap_remove(pagemap, work); - free_pages(gc, work->addr, round_to_apage_size(work->size)); + free_pages(gc, work->addr, round_to_apage_size(work->size), MMU_SMALL_GEN0, MMU_NON_PROTECTABLE, &work->mmu_src_block); free_mpage(work); } @@ -3459,10 +3627,10 @@ static void clean_up_heap(NewGC *gc) /* remove work from list */ if(prev) prev->next = next; else gc->gen1_pages[i] = next; if(next) work->next->prev = prev; - GCVERBOSEPAGE("Cleaning up BIGPAGE", work); + GCVERBOSEPAGE(gc, "Cleaning up BIGPAGE", work); gen1_free_mpage(pagemap, work); } else { - GCVERBOSEPAGE("clean_up_heap BIG PAGE ALIVE", work); + GCVERBOSEPAGE(gc, "clean_up_heap BIG PAGE ALIVE", work); pagemap_add(pagemap, work); work->back_pointers = work->marked_on = 0; memory_in_use += work->size; @@ -3511,7 +3679,7 @@ static void clean_up_heap(NewGC *gc) /* free the page */ if(prev) prev->next = next; else gc->med_pages[i] = next; if(next) work->next->prev = prev; - GCVERBOSEPAGE("Cleaning up MED PAGE NO OBJ", work); + GCVERBOSEPAGE(gc, "Cleaning up MED PAGE NO OBJ", work); gen1_free_mpage(pagemap, work); } } else if (gc->gc_full || !work->generation) { @@ -3520,7 +3688,7 @@ static void clean_up_heap(NewGC *gc) next = work->next; if(prev) prev->next = next; else gc->med_pages[i] = next; if(next) work->next->prev = prev; - GCVERBOSEPAGE("Cleaning up MED NO MARKEDON", work); + GCVERBOSEPAGE(gc, "Cleaning up MED NO MARKEDON", work); gen1_free_mpage(pagemap, work); } else { /* not touched during minor gc */ @@ -3542,63 +3710,111 @@ static void clean_up_heap(NewGC *gc) #ifdef MZ_USE_PLACES static void unprotect_old_pages(NewGC *gc) { - Page_Range *protect_range = gc->protect_range; + MMU *mmu = gc->mmu; mpage *page; int i; for(i = 0; i < PAGE_TYPES; i++) { - if(i != PAGE_ATOMIC) - for(page = gc->gen1_pages[i]; page; page = page->next) + if(i != PAGE_ATOMIC) { + for(page = gc->gen1_pages[i]; page; page = page->next) { if(page->page_type != PAGE_ATOMIC) { - if (page->mprotected) { - page->mprotected = 0; - add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 1); - } + page->mprotected = 0; + mmu_queue_write_unprotect_range(mmu, page->addr, real_page_size(page), page_mmu_type(page), &page->mmu_src_block); } - } - - for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (page = gc->med_pages[i]; page; page = page->next) { - if (page->mprotected) { - page->mprotected = 0; - add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 1); } } } - flush_protect_page_ranges(protect_range, 0); + for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { + for (page = gc->med_pages[i]; page; page = page->next) { + page->mprotected = 0; + mmu_queue_write_unprotect_range(mmu, page->addr, real_page_size(page), page_mmu_type(page), &page->mmu_src_block); + } + } + + mmu_flush_write_unprotect_ranges(mmu); } #endif static void protect_old_pages(NewGC *gc) { - Page_Range *protect_range = gc->protect_range; + MMU *mmu = gc->mmu; mpage *page; int i; +#ifdef GC_MP_CNT + mp_gc_protect_cnt = mp_pr_add_cnt; +#endif for(i = 0; i < PAGE_TYPES; i++) { - if(i != PAGE_ATOMIC) - for(page = gc->gen1_pages[i]; page; page = page->next) + if(i != PAGE_ATOMIC) { + for(page = gc->gen1_pages[i]; page; page = page->next) { if(page->page_type != PAGE_ATOMIC) { - if (!page->mprotected) { - page->mprotected = 1; - add_protect_page_range(protect_range, page->addr, page->size, APAGE_SIZE, 0); - } + page->back_pointers = 0; + page->mprotected = 1; + mmu_queue_write_protect_range(mmu, page->addr, real_page_size(page), page_mmu_type(page), &page->mmu_src_block); } - } - - for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { - for (page = gc->med_pages[i]; page; page = page->next) { - if (!page->mprotected) { - page->mprotected = 1; - add_protect_page_range(protect_range, page->addr, APAGE_SIZE, APAGE_SIZE, 0); } } } - flush_protect_page_ranges(protect_range, 0); + for (i = 0; i < NUM_MED_PAGE_SIZES; i++) { + for (page = gc->med_pages[i]; page; page = page->next) { + page->back_pointers = 0; + page->mprotected = 1; + mmu_queue_write_protect_range(mmu, page->addr, APAGE_SIZE, page_mmu_type(page), &page->mmu_src_block); + } + } + + mmu_flush_write_protect_ranges(mmu); + +#ifdef GC_MP_CNT + mp_gc_protect_cnt = mp_pr_add_cnt - mp_gc_protect_cnt; +#endif } +#ifdef GC_MP_CNT +void print_debug_stats(NewGC *gc) { + char* color; + if (!(mp_gcs_cnt % 30)) { + printf("GCINSTANC WRITE_BA GC_MARK2 DURINGGC PR_ADD__ PR_PROT_ PR_FFLUS UNPROTEC REPROTEC MMUALLOCATED COMPACTED_ COMPACTLOC BC_FREED AC_FREED\n"); + } + mp_gc_protect_cnt = mp_pr_add_cnt - mp_gc_protect_cnt; + mp_gcs_cnt ++; + + if (gc->gc_full) { + if (gc == MASTERGC) { + if (gc->num_major_collects % 2) color = "\033[0;32m"; + else color = "\033[1;32m"; + } + else { + if (gc->num_major_collects % 2) color = "\033[0;31m"; + else color = "\033[1;31m"; + } + } + else + color = "\033\[0;37m"; + printf("%s%p %08i %08i %08i %08i %08i %08i %08i %08i %012li %010li %010li %08li %08li%s\n", + color, + gc, + mp_write_barrier_cnt, + mp_mark_cnt, + mp_alloc_med_big_cnt, + mp_pr_add_cnt, + mp_pr_call_cnt, + mp_pr_ff_cnt, + mp_gc_unprotect_cnt, + mp_gc_protect_cnt, + mmu_memory_allocated(gc->mmu), + mp_compact_cnt, + mp_compact_cnt - mp_prev_compact_cnt, + mp_bc_freed, + mp_ac_freed, + "\033\[0;37m"); + mp_bc_freed = 0; + mp_ac_freed = 0; +} +#endif + #if 0 extern double scheme_get_inexact_milliseconds(void); # define TIME_DECLS() double start, task_start @@ -3633,6 +3849,8 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master) TIME_DECLS(); + dump_page_map(gc, "pre"); + /* determine if this should be a full collection or not */ gc->gc_full = force_full || !gc->generations_available || (gc->since_last_full > 100) || (gc->memory_in_use > (2 * gc->last_full_mem_use)); @@ -3649,9 +3867,9 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master) gc->full_needed_for_finalization= 0; gc->gc_full = 1; } -#ifdef DEBUG_GC_PAGES +#ifdef GC_DEBUG_PAGES if (gc->gc_full == 1) { - GCVERBOSEprintf("GC_FULL gc: %p MASTER: %p\n", gc, MASTERGC); + GCVERBOSEprintf(gc, "GC_FULL gc: %p MASTER: %p\n", gc, MASTERGC); } #endif gc->number_of_gc_runs++; @@ -3742,7 +3960,7 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master) if(gc->gc_full) #ifdef MZ_USE_PLACES - if (!MASTERGC) + if (premaster_or_place_gc(gc) || switching_master) #endif do_heap_compact(gc); TIME_STEP("compacted"); @@ -3791,12 +4009,16 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master) #else protect_old_pages(gc); #endif -} + } TIME_STEP("protect"); if (gc->gc_full) - vm_flush_freed_pages(gc->vm); + mmu_flush_freed_pages(gc->mmu); reset_finalizer_tree(gc); +#ifdef GC_MP_CNT + print_debug_stats(gc); +#endif + TIME_STEP("reset"); /* now we do want the allocator freaking if we go over half */ @@ -3805,8 +4027,8 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master) gc->no_further_modifications = 0; /* If we have too many idle pages, flush: */ - if (vm_memory_allocated(gc->vm) > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) { - vm_flush_freed_pages(gc->vm); + if (mmu_memory_allocated(gc->mmu) > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) { + mmu_flush_freed_pages(gc->mmu); } /* update some statistics */ @@ -3833,6 +4055,8 @@ static void garbage_collect(NewGC *gc, int force_full, int switching_master) TIME_DONE(); + dump_page_map(gc, "post"); + if (!gc->run_queue) next_gc_full = 0; @@ -3945,20 +4169,20 @@ void GC_free_all(void) next = work->next; if (work->mprotected) - vm_protect_pages(work->addr, real_page_size(work), 1); - GCVERBOSEPAGE("Cleaning up GC DYING", work); + { + mmu_write_unprotect_page(gc->mmu, work->addr, real_page_size(work)); + } + GCVERBOSEPAGE(gc, "Cleaning up GC DYING", work); gen1_free_mpage(pagemap, work); } } free(gc->mark_table); free(gc->fixup_table); - free_page_maps(gc->page_maps); - free(gc->protect_range); - vm_flush_freed_pages(gc->vm); - vm_free(gc->vm); + mmu_flush_freed_pages(gc->mmu); + mmu_free(gc->mmu); free(gc); } diff --git a/src/racket/gc2/newgc.h b/src/racket/gc2/newgc.h index 93f59bc834..ffe30ff9d5 100644 --- a/src/racket/gc2/newgc.h +++ b/src/racket/gc2/newgc.h @@ -1,21 +1,32 @@ #include "commongc_internal.h" #include "gc2_obj.h" +#if defined(MZ_USE_PLACES) +/* +# define GC_DEBUG_PAGES +# define MASTER_ALLOC_DEBUG +# define KILLING_DEBUG +*/ +#endif + + typedef struct mpage { struct mpage *next; struct mpage *prev; void *addr; unsigned long previous_size; /* for med page, place to search for available block; for jit nursery, allocated size */ unsigned long size; /* big page size, med page element size, or nursery starting point */ - unsigned char generation; /* + unsigned char generation :1; unsigned char back_pointers :1; - unsigned char big_page :2; + unsigned char size_cless :2; unsigned char page_type :3; unsigned char marked_on :1; unsigned char has_new :1; unsigned char mprotected :1; + unsigned char added :1; */ + unsigned char generation ; unsigned char back_pointers ; unsigned char size_class ; /* 0 => small; 1 => med; 2 => big; 3 => big marked */ unsigned char page_type ; @@ -24,7 +35,10 @@ typedef struct mpage { unsigned char mprotected ; unsigned char added ; unsigned short live_size; +#ifdef MZ_GC_BACKTRACE void **backtrace; +#endif + void *mmu_src_block; } mpage; typedef struct Gen0 { @@ -85,6 +99,13 @@ typedef struct Page_Range { unsigned long range_alloc_used; } Page_Range; +typedef struct { + char *start; + long len; + short age; + short zeroed; +} AllocCacheBlock; + #ifdef MZ_USE_PLACES typedef struct NewGCMasterInfo { unsigned long size; @@ -111,7 +132,6 @@ typedef struct NewGC { PageMap page_maps; /* All non-gen0 pages are held in the following structure. */ struct mpage *gen1_pages[PAGE_TYPES]; - Page_Range *protect_range; struct mpage *med_pages[NUM_MED_PAGE_SIZES]; struct mpage *med_freelist_pages[NUM_MED_PAGE_SIZES]; @@ -209,6 +229,10 @@ typedef struct NewGC { GC_Weak_Box *weak_boxes; GC_Ephemeron *ephemerons; int num_last_seen_ephemerons; - struct VM *vm; + struct MMU *mmu; + +#if defined(GC_DEBUG_PAGES) + FILE *GCVERBOSEFH; +#endif } NewGC; diff --git a/src/racket/gc2/page_range.c b/src/racket/gc2/page_range.c index 7f8fd9177b..739994a7a0 100644 --- a/src/racket/gc2/page_range.c +++ b/src/racket/gc2/page_range.c @@ -1,11 +1,65 @@ - -/* - Provides: - initialize_page_ranges - flush_page_ranges - add_page_range +/* + Provides: + page_range_initialize + page_range_add + page_range_flush + Requires: + os_protect_pages */ +#ifdef _WIN32 + +/* VirtualProtect can be used only on pages allocated at the same + time, so we can't collapse ranges. */ + +# define initialize_protect_page_ranges(pr, b, s) /* */ +# define add_protect_page_range(pr, s, l, a, w) vm_protect_pages(s, l, w) +# define flush_protect_page_ranges(pr, w) /* */ + +#else + +static void page_range_compact(Page_Range *pr); +static void page_range_reset(Page_Range *pr); +static void page_range_flush(Page_Range *pr, int writeable); +static int page_range_add_worker(Page_Range *pr, void *_start, unsigned long len); + + +static Page_Range *page_range_create() +{ + Page_Range *pr = ofm_malloc_zero(sizeof(Page_Range)); + pr->range_root = NULL; + pr->range_start = NULL; + pr->range_alloc_block = ofm_malloc(APAGE_SIZE); + pr->range_alloc_size = APAGE_SIZE; + pr->range_alloc_used = 0; + return pr; +} + +static void page_range_add(Page_Range *pr, void *_start, unsigned long len, int writeable) +{ + GC_MP_CNT_INC(mp_pr_add_cnt); + if (!page_range_add_worker(pr, _start, len)) { + GC_MP_CNT_INC(mp_pr_ff_cnt); + page_range_flush(pr, writeable); + page_range_add_worker(pr, _start, len); + } +} + + +static void page_range_flush(Page_Range *pr, int writeable) +{ + Range *work; + + page_range_compact(pr); + + for (work = pr->range_start; work; work = work->next) { + os_protect_pages((void *)work->start, work->len, writeable); + GC_MP_CNT_INC(mp_pr_call_cnt); + } + + page_range_reset(pr); +} + #define Tree Range #define Splay_Item(t) (t)->start #define Set_Splay_Item(t, v) (t)->start = (v) @@ -20,16 +74,7 @@ #undef Splay_Item #undef Set_Splay_Item -static void initialize_page_ranges(Page_Range *pr, void *block, unsigned long size) -{ - pr->range_root = NULL; - pr->range_start = NULL; - pr->range_alloc_block = block; - pr->range_alloc_size = size; - pr->range_alloc_used = 0; -} - -static void compact_page_ranges(Page_Range *pr) +static void page_range_compact(Page_Range *pr) { Range *work, *next; unsigned long start, len; @@ -52,7 +97,7 @@ static void compact_page_ranges(Page_Range *pr) } } -static void reset_page_ranges(Page_Range *pr) +static void page_range_reset(Page_Range *pr) { pr->range_alloc_used = 0; pr->range_root = NULL; @@ -77,14 +122,11 @@ static int try_extend(Range *r, unsigned long start, unsigned long len) return 0; } -static int add_page_range(Page_Range *pr, void *_start, unsigned long len, unsigned long alignment) +static int page_range_add_worker(Page_Range *pr, void *_start, unsigned long len) { unsigned long start = (unsigned long)_start; Range *r, *range_root = pr->range_root; - len += (alignment - 1); - len -= (len & (alignment - 1)); - range_root = range_splay(start, range_root); if (range_root) { @@ -130,4 +172,4 @@ static int add_page_range(Page_Range *pr, void *_start, unsigned long len, unsig return 1; } } - +#endif diff --git a/src/racket/gc2/protect_range.c b/src/racket/gc2/protect_range.c deleted file mode 100644 index fd29daa35b..0000000000 --- a/src/racket/gc2/protect_range.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - Provides: - Requires: - [page_range.c exports] - [page allocator] -*/ - -#ifdef _WIN32 - -/* VirtualProtect can be used only on pages allocated at the same - time, so we can't collapse ranges. */ - -# define initialize_protect_page_ranges(pr, b, s) /* */ -# define add_protect_page_range(pr, s, l, a, w) vm_protect_pages(s, l, w) -# define flush_protect_page_ranges(pr, w) /* */ - -#else - -static void initialize_protect_page_ranges(Page_Range *protect_range, void *block, unsigned long size) -{ - initialize_page_ranges(protect_range, block, size); -} - -static void flush_protect_page_ranges(Page_Range *protect_range, int writeable) -{ - Range *work; - - compact_page_ranges(protect_range); - - for (work = protect_range->range_start; work; work = work->next) { - vm_protect_pages((void *)work->start, work->len, writeable); - } - - reset_page_ranges(protect_range); -} - -static void add_protect_page_range(Page_Range *protect_range, void *_start, unsigned long len, unsigned long alignment, int writeable) -{ - if (!add_page_range(protect_range, _start, len, alignment)) { - flush_protect_page_ranges(protect_range, writeable); - add_page_range(protect_range, _start, len, alignment); - } -} - -#endif diff --git a/src/racket/gc2/vm.c b/src/racket/gc2/vm.c index 74a675d28d..a13eed6eed 100644 --- a/src/racket/gc2/vm.c +++ b/src/racket/gc2/vm.c @@ -1,66 +1,236 @@ - /******************************************************************************/ /* OS-specific low-level allocator */ /******************************************************************************/ +/* TODO +OSKIT and WINDOWS hard code os_pagesize to APAGE_SIZE +*/ + #ifndef GCPRINT # define GCPRINT fprintf # define GCOUTF stderr #endif -static inline size_t vm_round_up_to_page_size(size_t len, size_t page_size) { - len += (page_size -1) - (len & (page_size - 1)); - return len; +enum { + MMU_WRITE_PROTECTED = 0, + MMU_WRITABLE = 1, }; -#if !( defined(_WIN32) || defined(OSKIT) ) -typedef struct { - char *start; - long len; - short age; - short zeroed; -} FreeBlock; +#ifdef MZ_USE_PLACES +#define USE_BLOCK_CACHE #endif -typedef struct VM { -#if !( defined(_WIN32) || defined(OSKIT) ) - FreeBlock *freeblocks; +struct BlockCache; +typedef struct MMU { +#ifdef USE_BLOCK_CACHE + struct BlockCache *block_cache; +#elif !( defined(_WIN32) || defined(OSKIT) ) + AllocCacheBlock *alloc_caches[2]; + Page_Range *page_range; #endif - size_t memory_allocated; -} VM; + long memory_allocated; + size_t os_pagesize; + NewGC *gc; +} MMU; -static VM *vm_create() { - VM *vm = ofm_malloc(sizeof(VM)); - memset(vm, 0, sizeof(VM)); #if !( defined(_WIN32) || defined(OSKIT) ) - #define BLOCKFREE_CACHE_SIZE 96 - vm->freeblocks = ofm_malloc(sizeof(FreeBlock) * BLOCKFREE_CACHE_SIZE); - memset(vm->freeblocks, 0, sizeof(FreeBlock) * BLOCKFREE_CACHE_SIZE); +static void *os_alloc_pages(size_t len); +static void os_free_pages(void *p, size_t len); +static void os_protect_pages(void *p, size_t len, int writable); +#else +static void *os_alloc_pages(MMU *mmu, size_t len, size_t alignment, int dirty); +static void os_free_pages(MMU *mmu, void *p, size_t len); +static void os_protect_pages(void *p, size_t len, int writable); +static void os_flush_freed_pages(MMU *mmu); #endif - return vm; + +/* provides */ +static inline size_t mmu_get_os_page_size(MMU *mmu) { return mmu->os_pagesize; } +static size_t mmu_memory_allocated(MMU *mmu); + +static inline size_t align_up(const size_t len, const size_t boundary) { + const size_t modulo = (len & (boundary - 1)); + if (modulo) + return len + (boundary - modulo); + return len; +} +static inline void* align_up_ptr(const void *p, const size_t boundary) { + return (void*) align_up((size_t) p, boundary); } -static void vm_free(VM *vm) { +static inline size_t align_up_to_gc_pagesize(size_t len) { + const size_t page_size = APAGE_SIZE; + return align_up(len, page_size); +} + +static inline size_t mmu_round_up_to_os_page_size(MMU *mmu, size_t len) { + const size_t page_size = mmu->os_pagesize; + return align_up(len, page_size); +} + +static inline void mmu_assert_os_page_aligned(MMU *mmu, size_t p) { + if (p & (mmu->os_pagesize - 1)) { + printf("address or size is not OS PAGE ALIGNED!!!!"); + abort(); + } +} + +#ifdef USE_BLOCK_CACHE +#include "block_cache.c" +#include "alloc_cache.c" +#include "page_range.c" +#include +#elif !( defined(_WIN32) || defined(OSKIT) ) +#include "alloc_cache.c" +#include "page_range.c" +#include +#endif + + +static MMU *mmu_create(NewGC *gc) { + MMU *mmu = ofm_malloc_zero(sizeof(MMU)); + mmu->gc = gc; + #if !( defined(_WIN32) || defined(OSKIT) ) - free(vm->freeblocks); +#ifdef USE_BLOCK_CACHE + mmu->block_cache = block_cache_create(mmu); +#else + /* initialization of page_range */ + mmu->page_range = page_range_create(); + + /* initialization of alloc_cache */ + mmu->alloc_caches[0] = alloc_cache_create(); + mmu->alloc_caches[1] = alloc_cache_create(); #endif - free(vm); + + mmu->os_pagesize = getpagesize(); +#else + mmu->os_pagesize = APAGE_SIZE; +#endif + + return mmu; } -static size_t vm_memory_allocated(VM *vm) { - return vm->memory_allocated; +static void mmu_free(MMU *mmu) { +#ifdef USE_BLOCK_CACHE + block_cache_free(mmu->block_cache); +#elif !( defined(_WIN32) || defined(OSKIT) ) + free(mmu->alloc_caches[0]); + free(mmu->alloc_caches[1]); +#endif + free(mmu); } -static size_t vm_memory_allocated_inc(VM *vm, size_t len) { - vm->memory_allocated += len; - return vm->memory_allocated; +static void *mmu_alloc_page(MMU* mmu, size_t len, size_t alignment, int dirty, int type, int expect_mprotect, void **src_block) { + mmu_assert_os_page_aligned(mmu, len); +#ifdef USE_BLOCK_CACHE + return block_cache_alloc_page(mmu->block_cache, len, alignment, dirty, type, expect_mprotect, src_block, &mmu->memory_allocated); +#elif !( defined(_WIN32) || defined(OSKIT) ) + //len = mmu_round_up_to_os_page_size(mmu, len); + { + AllocCacheBlock *alloc_cache = mmu->alloc_caches[!!expect_mprotect]; + return alloc_cache_alloc_page(alloc_cache, len, alignment, dirty, &mmu->memory_allocated); + } +#else + return os_alloc_pages(mmu, len, alignment, dirty); +#endif } -static size_t vm_memory_allocated_dec(VM *vm, size_t len) { - vm->memory_allocated -= len; - return vm->memory_allocated; +static void mmu_free_page(MMU* mmu, void *p, size_t len, int type, int expect_mprotect, void **src_block) { + mmu_assert_os_page_aligned(mmu, (size_t)p); + mmu_assert_os_page_aligned(mmu, len); +#ifdef USE_BLOCK_CACHE + mmu->memory_allocated += block_cache_free_page(mmu->block_cache, p, len, type, expect_mprotect, src_block); +#elif !( defined(_WIN32) || defined(OSKIT) ) + //len = mmu_round_up_to_os_page_size(mmu, len); + { + AllocCacheBlock *alloc_cache = mmu->alloc_caches[!!expect_mprotect]; + mmu->memory_allocated += alloc_cache_free_page(alloc_cache, p, len, MMU_DIRTY); + } +#else + os_free_pages(mmu, p, len); +#endif } +static void mmu_flush_freed_pages(MMU *mmu) { +#ifdef USE_BLOCK_CACHE + mmu->memory_allocated += block_cache_flush_freed_pages(mmu->block_cache); +#elif !( defined(_WIN32) || defined(OSKIT) ) + mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[0]); + mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[1]); +#elif defined(_WIN32) + os_flush_freed_pages(mmu); +#endif +} + +static void mmu_prep_for_compaction(MMU *mmu) { +#ifdef USE_BLOCK_CACHE + block_cache_prep_for_compaction(mmu->block_cache); +#endif +} + +static int mmu_should_compact_page(MMU *mmu, void **src_block) { +#ifdef USE_BLOCK_CACHE + return block_cache_compact(src_block); +#endif + return 0; +} + +static void mmu_write_unprotect_page(MMU *mmu, void *p, size_t len) { + mmu_assert_os_page_aligned(mmu, (size_t)p); + mmu_assert_os_page_aligned(mmu, len); + os_protect_pages(p, len, 1); +} + +static void mmu_queue_protect_range(MMU *mmu, void *p, size_t len, int type, int writeable, void **src_block) { + mmu_assert_os_page_aligned(mmu, (size_t)p); + mmu_assert_os_page_aligned(mmu, len); +#ifdef USE_BLOCK_CACHE + block_cache_queue_protect_range(mmu->block_cache, p, len, type, writeable, src_block); +#elif !( defined(_WIN32) || defined(OSKIT) ) + page_range_add(mmu->page_range, p, len, writeable); +#else + os_protect_pages(p, len, writeable); +#endif +} + +static void mmu_queue_write_protect_range(MMU *mmu, void *p, size_t len, int type, void **src_block) { + mmu_queue_protect_range(mmu, p, len, type, MMU_WRITE_PROTECTED, src_block); +} + +static void mmu_queue_write_unprotect_range(MMU *mmu, void *p, size_t len, int type, void **src_block) { + mmu_queue_protect_range(mmu, p, len, type, MMU_WRITABLE, src_block); +} + +static void mmu_flush_write_protect_ranges(MMU *mmu) { +#ifdef USE_BLOCK_CACHE + block_cache_flush_protect_ranges(mmu->block_cache, MMU_WRITE_PROTECTED); +#elif !( defined(_WIN32) || defined(OSKIT) ) + page_range_flush(mmu->page_range, MMU_WRITE_PROTECTED); +#endif +} + +static void mmu_flush_write_unprotect_ranges(MMU *mmu) { +#ifdef USE_BLOCK_CACHE + block_cache_flush_protect_ranges(mmu->block_cache, MMU_WRITABLE); +#elif !( defined(_WIN32) || defined(OSKIT) ) + page_range_flush(mmu->page_range, MMU_WRITABLE); +#endif +} + +static size_t mmu_memory_allocated(MMU *mmu) { + return mmu->memory_allocated; +} + +#if ( defined(_WIN32) || defined(OSKIT) ) + page_range_flush(mmu->page_range, MMU_WRITABLE); +static void mmu_memory_allocated_inc(MMU *mmu, long amt) { + mmu->memory_allocated += amt; +} +static void mmu_memory_allocated_dec(MMU *mmu, long amt) { + mmu->memory_allocated -= amt; +} +#endif #if _WIN32 /* Windows */ # include "vm_win.c" @@ -68,6 +238,6 @@ static size_t vm_memory_allocated_dec(VM *vm, size_t len) { # include "vm_osk.c" #elif defined(OS_X) /* OS X */ # include "vm_osx.c" -#else /* Default: mmap */ +#else /* Default: mmap, linux, unix, freebsd*/ # include "vm_mmap.c" #endif diff --git a/src/racket/gc2/vm_memalign.c b/src/racket/gc2/vm_memalign.c index 5322329b52..c718e01f95 100644 --- a/src/racket/gc2/vm_memalign.c +++ b/src/racket/gc2/vm_memalign.c @@ -14,7 +14,7 @@ static int page_size; /* OS page size */ -static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) +static void *os_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) { void *r; @@ -38,17 +38,13 @@ static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) return r; } -static void vm_free_pages(VM *vm, void *p, size_t len) +static void os_free_pages(VM *vm, void *p, size_t len) { vm_memory_allocated_dec(vm, len); free(p); } -static void vm_flush_freed_pages(VM *vm) -{ -} - -static void vm_protect_pages(void *p, size_t len, int writeable) +static void os_protect_pages(void *p, size_t len, int writeable) { if (len & (page_size - 1)) { len += page_size - (len & (page_size - 1)); diff --git a/src/racket/gc2/vm_mmap.c b/src/racket/gc2/vm_mmap.c index 3d71e471cd..718e4eea36 100644 --- a/src/racket/gc2/vm_mmap.c +++ b/src/racket/gc2/vm_mmap.c @@ -2,7 +2,7 @@ /* static void os_vm_free_pages(void *p, size_t len); static void *os_vm_alloc_pages(size_t len); -static void vm_protect_pages(void *p, size_t len, int writeable); +static void os_protect_pages(void *p, size_t len, int writeable); */ /* Requires: */ /* Optional: @@ -15,16 +15,14 @@ static void vm_protect_pages(void *p, size_t len, int writeable); #include #include -static long page_size; - -static void os_vm_free_pages(void *p, size_t len) +static void os_free_pages(void *p, size_t len) { if (munmap(p, len)) { GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)p, (long)len, errno); } } -static void *os_vm_alloc_pages(size_t len) +static void *os_alloc_pages(size_t len) { void *r; @@ -38,10 +36,6 @@ static void *os_vm_alloc_pages(size_t len) } #endif - /* Round up to nearest page: */ - if (len & (page_size - 1)) - len += page_size - (len & (page_size - 1)); - #ifdef MAP_ANON r = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); #else @@ -55,14 +49,9 @@ static void *os_vm_alloc_pages(size_t len) } -static void vm_protect_pages(void *p, size_t len, int writeable) +static void os_protect_pages(void *p, size_t len, int writeable) { - if (len & (page_size - 1)) { - len += page_size - (len & (page_size - 1)); - } - mprotect(p, len, (writeable ? (PROT_READ | PROT_WRITE) : PROT_READ)); } -#include "alloc_cache.c" #include "rlimit_heapsize.c" diff --git a/src/racket/gc2/vm_osk.c b/src/racket/gc2/vm_osk.c index 8fc5eb45ab..34e94c5e5f 100644 --- a/src/racket/gc2/vm_osk.c +++ b/src/racket/gc2/vm_osk.c @@ -9,7 +9,7 @@ #include -inline static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) +inline static void *os_malloc_pages(MMU *mmu, size_t len, size_t alignment, int dirty_ok) { void *p; @@ -18,21 +18,17 @@ inline static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int di if (!dirty_ok) memset(p, 0, len); - vm_memory_allocated_inc(vm, len); + mmu_memory_allocated_inc(mmu, len); return p; } -static void vm_free_pages(VM *vm, void *p, size_t len) +static void os_free_pages(MMU *mmu, void *p, size_t len) { - vm_memory_allocated_dec(vm, len); + mmu_memory_allocated_dec(mmu, len); sfree(p, len); } -static void vm_flush_freed_pages(VM *vm) -{ -} - #ifndef DONT_NEED_MAX_HEAP_SIZE static unsigned long determine_max_heap_size(void) { diff --git a/src/racket/gc2/vm_osx.c b/src/racket/gc2/vm_osx.c index d569bf00e0..c715e2879f 100644 --- a/src/racket/gc2/vm_osx.c +++ b/src/racket/gc2/vm_osx.c @@ -147,7 +147,7 @@ static mach_port_t task_self = 0; static mach_port_t exc_port = 0; /* the VM subsystem as defined by the GC files */ -static void *os_vm_alloc_pages(size_t len) +static void *os_alloc_pages(size_t len) { kern_return_t retval; void *r; @@ -167,7 +167,7 @@ static void *os_vm_alloc_pages(size_t len) return r; } -static void os_vm_free_pages(void *p, size_t len) +static void os_free_pages(void *p, size_t len) { kern_return_t retval; @@ -178,7 +178,7 @@ static void os_vm_free_pages(void *p, size_t len) } } -static void vm_protect_pages(void *p, size_t len, int writeable) +static void os_protect_pages(void *p, size_t len, int writeable) { kern_return_t retval; @@ -195,8 +195,6 @@ static void vm_protect_pages(void *p, size_t len, int writeable) } } -#include "alloc_cache.c" - #ifndef DONT_NEED_MAX_HEAP_SIZE static unsigned long determine_max_heap_size() diff --git a/src/racket/gc2/vm_win.c b/src/racket/gc2/vm_win.c index 42ca809054..e0a3fce118 100644 --- a/src/racket/gc2/vm_win.c +++ b/src/racket/gc2/vm_win.c @@ -20,7 +20,7 @@ typedef struct { static alloc_cache_entry cache[2][CACHE_SLOTS]; #endif -static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) +static void *os_alloc_pages(MMU *mmu, size_t len, size_t alignment, int dirty_ok) { #if CACHE_SLOTS { @@ -42,7 +42,7 @@ static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) } #endif - vm_memory_allocated_inc(vm, len); + mmu_memory_allocated_inc(mmu, len); /* VirtualAlloc MEM_COMMIT always zeros memory */ return (void *)VirtualAlloc(NULL, len, @@ -50,7 +50,7 @@ static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok) PAGE_READWRITE); } -static void vm_free_pages(VM *vm, void *p, size_t len) +static void os_free_pages(MMU *mmu, void *p, size_t len) { #if CACHE_SLOTS @@ -69,11 +69,11 @@ static void vm_free_pages(VM *vm, void *p, size_t len) } #endif - vm_memory_allocated_dec(vm, len); + mmu_memory_allocated_dec(mmu, len); VirtualFree(p, 0, MEM_RELEASE); } -static void vm_flush_freed_pages(VM *vm) +static void os_flush_freed_pages(MMU *mmu) { #if CACHE_SLOTS int i; @@ -83,7 +83,7 @@ static void vm_flush_freed_pages(VM *vm) if (cache[1][i].len) { for (p = cache[1][i].page; p; p = next) { next = *(void **)p; - vm_memory_allocated_dec(vm, cache[i].len); + mmu_memory_allocated_dec(mmu, cache[i].len); VirtualFree(p, 0, MEM_RELEASE); } } @@ -95,7 +95,7 @@ static void vm_flush_freed_pages(VM *vm) #endif } -static void vm_protect_pages(void *p, size_t len, int writeable) +static void os_protect_pages(void *p, size_t len, int writeable) { DWORD old; VirtualProtect(p, len, (writeable ? PAGE_READWRITE : PAGE_READONLY), &old); diff --git a/src/racket/src/places.c b/src/racket/src/places.c index 9a3b59924d..2f19367444 100644 --- a/src/racket/src/places.c +++ b/src/racket/src/places.c @@ -515,7 +515,7 @@ Scheme_Object *scheme_places_deep_copy_worker(Scheme_Object *so, Scheme_Hash_Tab car = scheme_places_deep_copy_worker(SCHEME_CAR(so), ht); cdr = scheme_places_deep_copy_worker(SCHEME_CDR(so), ht); pair = scheme_make_pair(car, cdr); - return pair; + new_so = pair; } break; case scheme_vector_type: