From dc2c16058bed219dc3d0ad1a5dcdee7f8f8c3349 Mon Sep 17 00:00:00 2001 From: Kevin Tew Date: Fri, 22 Apr 2011 12:17:37 -0600 Subject: [PATCH] [Places] free alloc_cache and page_range on place termination --- src/racket/gc2/alloc_cache.c | 27 +++++++++++++++++++++++++++ src/racket/gc2/block_cache.c | 9 +++++++-- src/racket/gc2/page_range.c | 8 ++++++++ src/racket/gc2/vm.c | 9 ++++++--- 4 files changed, 48 insertions(+), 5 deletions(-) diff --git a/src/racket/gc2/alloc_cache.c b/src/racket/gc2/alloc_cache.c index 37172ce0f3..44895afa90 100644 --- a/src/racket/gc2/alloc_cache.c +++ b/src/racket/gc2/alloc_cache.c @@ -22,6 +22,16 @@ static AllocCacheBlock *alloc_cache_create() { return ofm_malloc_zero(sizeof(AllocCacheBlock) * BLOCKFREE_CACHE_SIZE); } +static ssize_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree); +static ssize_t alloc_cache_free(AllocCacheBlock *ac) { + if (ac) { + ssize_t s = alloc_cache_free_all_pages(ac); + free(ac); + return s; + } + return 0; +} + static int alloc_cache_block_compare(const void *a, const void *b) { if ((uintptr_t)((AllocCacheBlock *)a)->start < (uintptr_t)((AllocCacheBlock *)b)->start) @@ -162,6 +172,23 @@ static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree) return freed; } +static ssize_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree) +{ + int i; + ssize_t freed = 0; + alloc_cache_collapse_pages(blockfree); + + for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) { + if (blockfree[i].start) { + os_free_pages(blockfree[i].start, blockfree[i].len); + freed -= blockfree[i].len; + blockfree[i].start = NULL; + blockfree[i].len = 0; + } + } + return freed; +} + /* Instead of immediately freeing pages with munmap---only to mmap them again---we cache BLOCKFREE_CACHE_SIZE freed pages. A page is cached unused for at most BLOCKFREE_UNMAP_AGE cycles of the diff --git a/src/racket/gc2/block_cache.c b/src/racket/gc2/block_cache.c index dd522aa658..9f83ac6b0b 100644 --- a/src/racket/gc2/block_cache.c +++ b/src/racket/gc2/block_cache.c @@ -11,11 +11,13 @@ static void os_protect_pages(void *p, size_t len, int writable); struct block_desc; static AllocCacheBlock *alloc_cache_create(); +static ssize_t alloc_cache_free(AllocCacheBlock *); static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty); static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree); static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff); static Page_Range *page_range_create(); +static void page_range_free(Page_Range *pr); static void page_range_flush(Page_Range *pr, int writeable); static void page_range_add(Page_Range *pr, void *_start, uintptr_t len, int writeable); @@ -62,15 +64,18 @@ static BlockCache* block_cache_create(MMU *mmu) { bc->atomic.atomic = 1; gclist_init(&bc->non_atomic.full); gclist_init(&bc->non_atomic.free); - bc->atomic.atomic = 0; + bc->non_atomic.atomic = 0; bc->bigBlockCache = alloc_cache_create(); bc->page_range = page_range_create(); bc->mmu = mmu; return bc; } -static void block_cache_free(BlockCache* bc) { +static ssize_t block_cache_free(BlockCache* bc) { + ssize_t acf = alloc_cache_free(bc->bigBlockCache); + page_range_free(bc->page_range); free(bc); + return acf; } static block_desc *bc_alloc_std_block(block_group *bg) { diff --git a/src/racket/gc2/page_range.c b/src/racket/gc2/page_range.c index b5a507b5c4..218fb7c2a4 100644 --- a/src/racket/gc2/page_range.c +++ b/src/racket/gc2/page_range.c @@ -35,6 +35,14 @@ static Page_Range *page_range_create() return pr; } +static void page_range_free(Page_Range *pr) +{ + if (pr) { + free(pr->range_alloc_block); + free(pr); + } +} + static void page_range_add(Page_Range *pr, void *_start, uintptr_t len, int writeable) { GC_MP_CNT_INC(mp_pr_add_cnt); diff --git a/src/racket/gc2/vm.c b/src/racket/gc2/vm.c index ac2fee02be..648948bb54 100644 --- a/src/racket/gc2/vm.c +++ b/src/racket/gc2/vm.c @@ -112,12 +112,15 @@ static MMU *mmu_create(NewGC *gc) { } static void mmu_free(MMU *mmu) { + /* printf("MMU ALLOCATED PRE %li\n", mmu->memory_allocated); */ #ifdef USE_BLOCK_CACHE - block_cache_free(mmu->block_cache); + mmu->memory_allocated += block_cache_free(mmu->block_cache); #elif !( defined(_WIN32) || defined(OSKIT) ) - free(mmu->alloc_caches[0]); - free(mmu->alloc_caches[1]); + page_range_free(mmu->page_range); + mmu->memory_allocated += alloc_cache_free(mmu->alloc_caches[0]); + mmu->memory_allocated += alloc_cache_free(mmu->alloc_caches[1]); #endif + /* printf("MMU ALLOCATED POST %li\n", mmu->memory_allocated); */ free(mmu); }