[Places] free alloc_cache and page_range on place termination

This commit is contained in:
Kevin Tew 2011-04-22 12:17:37 -06:00
parent 012868e491
commit dc2c16058b
4 changed files with 48 additions and 5 deletions

View File

@ -22,6 +22,16 @@ static AllocCacheBlock *alloc_cache_create() {
return ofm_malloc_zero(sizeof(AllocCacheBlock) * BLOCKFREE_CACHE_SIZE);
}
static ssize_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree);
static ssize_t alloc_cache_free(AllocCacheBlock *ac) {
if (ac) {
ssize_t s = alloc_cache_free_all_pages(ac);
free(ac);
return s;
}
return 0;
}
static int alloc_cache_block_compare(const void *a, const void *b)
{
if ((uintptr_t)((AllocCacheBlock *)a)->start < (uintptr_t)((AllocCacheBlock *)b)->start)
@ -162,6 +172,23 @@ static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree)
return freed;
}
static ssize_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree)
{
int i;
ssize_t freed = 0;
alloc_cache_collapse_pages(blockfree);
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
if (blockfree[i].start) {
os_free_pages(blockfree[i].start, blockfree[i].len);
freed -= blockfree[i].len;
blockfree[i].start = NULL;
blockfree[i].len = 0;
}
}
return freed;
}
/* Instead of immediately freeing pages with munmap---only to mmap
them again---we cache BLOCKFREE_CACHE_SIZE freed pages. A page is
cached unused for at most BLOCKFREE_UNMAP_AGE cycles of the

View File

@ -11,11 +11,13 @@ static void os_protect_pages(void *p, size_t len, int writable);
struct block_desc;
static AllocCacheBlock *alloc_cache_create();
static ssize_t alloc_cache_free(AllocCacheBlock *);
static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty);
static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree);
static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff);
static Page_Range *page_range_create();
static void page_range_free(Page_Range *pr);
static void page_range_flush(Page_Range *pr, int writeable);
static void page_range_add(Page_Range *pr, void *_start, uintptr_t len, int writeable);
@ -62,15 +64,18 @@ static BlockCache* block_cache_create(MMU *mmu) {
bc->atomic.atomic = 1;
gclist_init(&bc->non_atomic.full);
gclist_init(&bc->non_atomic.free);
bc->atomic.atomic = 0;
bc->non_atomic.atomic = 0;
bc->bigBlockCache = alloc_cache_create();
bc->page_range = page_range_create();
bc->mmu = mmu;
return bc;
}
static void block_cache_free(BlockCache* bc) {
static ssize_t block_cache_free(BlockCache* bc) {
ssize_t acf = alloc_cache_free(bc->bigBlockCache);
page_range_free(bc->page_range);
free(bc);
return acf;
}
static block_desc *bc_alloc_std_block(block_group *bg) {

View File

@ -35,6 +35,14 @@ static Page_Range *page_range_create()
return pr;
}
static void page_range_free(Page_Range *pr)
{
if (pr) {
free(pr->range_alloc_block);
free(pr);
}
}
static void page_range_add(Page_Range *pr, void *_start, uintptr_t len, int writeable)
{
GC_MP_CNT_INC(mp_pr_add_cnt);

View File

@ -112,12 +112,15 @@ static MMU *mmu_create(NewGC *gc) {
}
static void mmu_free(MMU *mmu) {
/* printf("MMU ALLOCATED PRE %li\n", mmu->memory_allocated); */
#ifdef USE_BLOCK_CACHE
block_cache_free(mmu->block_cache);
mmu->memory_allocated += block_cache_free(mmu->block_cache);
#elif !( defined(_WIN32) || defined(OSKIT) )
free(mmu->alloc_caches[0]);
free(mmu->alloc_caches[1]);
page_range_free(mmu->page_range);
mmu->memory_allocated += alloc_cache_free(mmu->alloc_caches[0]);
mmu->memory_allocated += alloc_cache_free(mmu->alloc_caches[1]);
#endif
/* printf("MMU ALLOCATED POST %li\n", mmu->memory_allocated); */
free(mmu);
}