diff --git a/src/racket/gc2/alloc_cache.c b/src/racket/gc2/alloc_cache.c index 4a3ee07d80..a15c107bde 100644 --- a/src/racket/gc2/alloc_cache.c +++ b/src/racket/gc2/alloc_cache.c @@ -1,6 +1,6 @@ /* Provides: - static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty) + static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here) static ssize_t void alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree) static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff) Requires (defined earlier): @@ -112,7 +112,7 @@ inline static void *alloc_cache_find_pages(AllocCacheBlock *blockfree, size_t le return NULL; } -static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty) +static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here) { int i; @@ -124,14 +124,14 @@ static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t blockfree[i].len += len; if (dirty) blockfree[i].zeroed = 0; - return 0; + return (originated_here ? 0 : len); } if (p + len == blockfree[i].start) { blockfree[i].start = p; blockfree[i].len += len; if (dirty) blockfree[i].zeroed = 0; - return 0; + return (originated_here ? 0 : len); } } @@ -141,7 +141,7 @@ static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t blockfree[i].len = len; blockfree[i].age = 0; blockfree[i].zeroed = !dirty; - return 0; + return (originated_here ? 0 : len); } } @@ -149,7 +149,7 @@ static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t alloc_cache_collapse_pages(blockfree); os_free_pages(p, len); - return -len; + return (originated_here ? -len : 0); } static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree) @@ -226,7 +226,7 @@ static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, siz /* Instead of actually unmapping, put it in the cache, and there's a good chance we can use it next time: */ (*size_diff) += extra; - (*size_diff) += alloc_cache_free_page(blockfree, real_r + len, extra, 1); + (*size_diff) += alloc_cache_free_page(blockfree, real_r + len, extra, 1, 1); } else { os_free_pages(real_r + len, extra - pre_extra); } diff --git a/src/racket/gc2/block_cache.c b/src/racket/gc2/block_cache.c index 9f83ac6b0b..d3b30be95b 100644 --- a/src/racket/gc2/block_cache.c +++ b/src/racket/gc2/block_cache.c @@ -12,7 +12,7 @@ static void os_protect_pages(void *p, size_t len, int writable); struct block_desc; static AllocCacheBlock *alloc_cache_create(); static ssize_t alloc_cache_free(AllocCacheBlock *); -static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty); +static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here); static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree); static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff); @@ -222,7 +222,8 @@ static int find_addr_in_bd(GCList *head, void *p, char* msg) { } #endif -static ssize_t block_cache_free_page(BlockCache* bc, void *p, size_t len, int type, int expect_mprotect, void **src_block) { +static ssize_t block_cache_free_page(BlockCache* bc, void *p, size_t len, int type, int expect_mprotect, void **src_block, + int originated_here) { switch(type) { case MMU_SMALL_GEN1: { @@ -252,7 +253,7 @@ static ssize_t block_cache_free_page(BlockCache* bc, void *p, size_t len, int ty printf("FREE PAGE %i %p %p-%p %03i %03i %04i %04i : %03i %03i %03i %03i %09i\n", expect_mprotect, bg, p, p + APAGE_SIZE, afu, afr, nafu, nafr, afub, afrb, nafub, nafrb, mmu_memory_allocated(bc->mmu)); } #endif - return 0; + return (originated_here ? 0 : len); } break; default: @@ -263,7 +264,7 @@ static ssize_t block_cache_free_page(BlockCache* bc, void *p, size_t len, int ty find_addr_in_bd(&bc->non_atomic.free, p, "non_atomic freeblock"))); assert(*src_block == (char*)~0x0); #endif - return alloc_cache_free_page(bc->bigBlockCache, p, len, MMU_DIRTY); + return alloc_cache_free_page(bc->bigBlockCache, p, len, MMU_DIRTY, originated_here); break; } } diff --git a/src/racket/gc2/newgc.c b/src/racket/gc2/newgc.c index 26b145160c..e285f2f0ef 100644 --- a/src/racket/gc2/newgc.c +++ b/src/racket/gc2/newgc.c @@ -378,17 +378,18 @@ static void *malloc_pages(NewGC *gc, size_t len, size_t alignment, int dirty, in static void free_pages(NewGC *gc, void *p, size_t len, int type, int expect_mprotect, void **src_block) { gc->used_pages -= size_to_apage_count(len); - mmu_free_page(gc->mmu, p, len, type, expect_mprotect, src_block); + mmu_free_page(gc->mmu, p, len, type, expect_mprotect, src_block, 1); } static void free_orphaned_page(NewGC *gc, mpage *tmp) { - /* free_pages decrements gc->used_pages which is incorrect, since this is an orphaned page + /* free_pages decrements gc->used_pages which is incorrect, since this is an orphaned page, * so we use mmu_free_page directly */ mmu_free_page(gc->mmu, tmp->addr, round_to_apage_size(tmp->size), - page_mmu_type(tmp), - page_mmu_protectable(tmp), - &tmp->mmu_src_block); + page_mmu_type(tmp), + page_mmu_protectable(tmp), + &tmp->mmu_src_block, + 0); /* don't adjust count, since we're failing to adopt it */ free_mpage(tmp); } @@ -899,16 +900,14 @@ static void *allocate_big(const size_t request_size_bytes, int type) gc->gen0.big_pages = bpage; - /* orphan this page from the current GC */ - /* this page is going to be sent to a different place, don't account for it here */ - /* message memory pages shouldn't go into the page_map, they are getting sent to another place */ if (gc->saved_allocator) { + /* MESSAGE ALLOCATION: orphan this page from the current GC; this + page is going to be sent to a different place, so don't account + for it here, and don't put it in the page_map */ orphan_page_accounting(gc, allocate_size); - } - else { + } else pagemap_add(gc->page_maps, bpage); - } - + { void * objptr = BIG_PAGE_TO_OBJECT(bpage); ASSERT_VALID_OBJPTR(objptr); @@ -942,7 +941,11 @@ inline static mpage *create_new_medium_page(NewGC *gc, const int sz, const int p gc->med_pages[pos] = page; gc->med_freelist_pages[pos] = page; - pagemap_add(gc->page_maps, page); + if (gc->saved_allocator) /* see MESSAGE ALLOCATION above */ + orphan_page_accounting(gc, APAGE_SIZE); + else + pagemap_add(gc->page_maps, page); + return page; } @@ -1049,15 +1052,10 @@ inline static mpage *gen0_create_new_nursery_mpage(NewGC *gc, const size_t page_ page->size = PREFIX_SIZE; GEN0_ALLOC_SIZE(page) = page_size; - /* orphan this page from the current GC */ - /* this page is going to be sent to a different place, don't account for it here */ - /* message memory pages shouldn't go into the page_map, they are getting sent to another place */ - if (gc->saved_allocator) { + if (gc->saved_allocator) /* see MESSAGE ALLOCATION above */ orphan_page_accounting(gc, page_size); - } - else { + else pagemap_add_with_size(gc->page_maps, page, page_size); - } GCVERBOSEPAGE(gc, "NEW gen0", page); diff --git a/src/racket/gc2/vm.c b/src/racket/gc2/vm.c index 648948bb54..43fe58575c 100644 --- a/src/racket/gc2/vm.c +++ b/src/racket/gc2/vm.c @@ -135,22 +135,26 @@ static void *mmu_alloc_page(MMU* mmu, size_t len, size_t alignment, int dirty, i return alloc_cache_alloc_page(alloc_cache, len, alignment, dirty, &mmu->memory_allocated); } #else + mmu->memory_allocated += len; return os_alloc_pages(mmu, len, alignment, dirty); #endif } -static void mmu_free_page(MMU* mmu, void *p, size_t len, int type, int expect_mprotect, void **src_block) { +static void mmu_free_page(MMU* mmu, void *p, size_t len, int type, int expect_mprotect, void **src_block, + int originated_here) { mmu_assert_os_page_aligned(mmu, (size_t)p); mmu_assert_os_page_aligned(mmu, len); #ifdef USE_BLOCK_CACHE - mmu->memory_allocated += block_cache_free_page(mmu->block_cache, p, len, type, expect_mprotect, src_block); + mmu->memory_allocated += block_cache_free_page(mmu->block_cache, p, len, type, expect_mprotect, src_block, + originated_here); #elif !( defined(_WIN32) || defined(OSKIT) ) //len = mmu_round_up_to_os_page_size(mmu, len); { AllocCacheBlock *alloc_cache = mmu->alloc_caches[!!expect_mprotect]; - mmu->memory_allocated += alloc_cache_free_page(alloc_cache, p, len, MMU_DIRTY); + mmu->memory_allocated += alloc_cache_free_page(alloc_cache, p, len, MMU_DIRTY, originated_here); } #else + if (originated_here) mmu->memory_allocated -= len; os_free_pages(mmu, p, len); #endif }