GC: give main allocation pages back to the GC less eagerly

This commit is contained in:
Matthew Flatt 2016-05-15 14:20:04 -06:00
parent 7fbe6a4097
commit b243ce894a
5 changed files with 33 additions and 17 deletions

View File

@ -12,7 +12,7 @@
*/ */
/* Controls how often freed pages are actually returned to OS: */ /* Controls how often freed pages are actually returned to OS: */
#define BLOCKFREE_UNMAP_AGE 3 #define BLOCKFREE_UNMAP_AGE FREE_UNMAP_AGE
/* Controls size of the cache */ /* Controls size of the cache */
#define BLOCKFREE_CACHE_SIZE 96 #define BLOCKFREE_CACHE_SIZE 96
@ -171,7 +171,7 @@ static intptr_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_
return (originated_here ? -len : 0); return (originated_here ? -len : 0);
} }
static intptr_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree) static intptr_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree, int force)
{ {
int i; int i;
intptr_t freed = 0; intptr_t freed = 0;
@ -179,7 +179,7 @@ static intptr_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree)
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) { for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
if (blockfree[i].start) { if (blockfree[i].start) {
if (blockfree[i].age == BLOCKFREE_UNMAP_AGE) { if (force || (blockfree[i].age == BLOCKFREE_UNMAP_AGE)) {
os_free_pages(blockfree[i].start, blockfree[i].len); os_free_pages(blockfree[i].start, blockfree[i].len);
freed -= blockfree[i].len; freed -= blockfree[i].len;
blockfree[i].start = NULL; blockfree[i].start = NULL;

View File

@ -10,12 +10,14 @@ static void os_protect_pages(void *p, size_t len, int writable);
#define BC_STARTING_BLOCK_SIZE (1 << 21) /* 2 MB */ #define BC_STARTING_BLOCK_SIZE (1 << 21) /* 2 MB */
#define BC_MAX_BLOCK_SIZE (1 << 24) /* 16 MB */ #define BC_MAX_BLOCK_SIZE (1 << 24) /* 16 MB */
#define STDBLOCKFREE_UNMAP_AGE FREE_UNMAP_AGE
struct block_desc; struct block_desc;
struct AllocCacheBlock; struct AllocCacheBlock;
static struct AllocCacheBlock *alloc_cache_create(); static struct AllocCacheBlock *alloc_cache_create();
static ssize_t alloc_cache_free(struct AllocCacheBlock *); static ssize_t alloc_cache_free(struct AllocCacheBlock *);
static ssize_t alloc_cache_free_page(struct AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here); static ssize_t alloc_cache_free_page(struct AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here);
static ssize_t alloc_cache_flush_freed_pages(struct AllocCacheBlock *blockfree); static ssize_t alloc_cache_flush_freed_pages(struct AllocCacheBlock *blockfree, int force);
static void *alloc_cache_alloc_page(struct AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff); static void *alloc_cache_alloc_page(struct AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff);
static Page_Range *page_range_create(); static Page_Range *page_range_create();
@ -40,6 +42,7 @@ typedef struct block_desc {
intptr_t used; intptr_t used;
intptr_t totalcnt; intptr_t totalcnt;
intptr_t freecnt; intptr_t freecnt;
int free_age;
struct block_group *group; struct block_group *group;
char in_queue, want_compact; char in_queue, want_compact;
} block_desc; } block_desc;
@ -197,6 +200,7 @@ static void *bc_alloc_std_page(BlockCache *bc, int dirty_ok, int expect_mprotect
bd->free = fl->next; bd->free = fl->next;
bd->freecnt--; bd->freecnt--;
bd->free_age = 0;
*src_block = bd; *src_block = bd;
@ -257,8 +261,14 @@ static void *bc_alloc_std_page(BlockCache *bc, int dirty_ok, int expect_mprotect
} }
} }
static ssize_t bc_free_std_block(block_desc *b, int expect_mprotect) { static ssize_t bc_free_std_block(block_desc *b, int expect_mprotect, int force) {
ssize_t size_diff = 0; ssize_t size_diff = 0;
if (!force && (b->free_age < STDBLOCKFREE_UNMAP_AGE)) {
b->free_age++;
return 0;
}
/* printf("BLOCK FREE %d %ld\n", expect_mprotect, b->size); */ /* printf("BLOCK FREE %d %ld\n", expect_mprotect, b->size); */
gclist_del(&b->gclist); gclist_del(&b->gclist);
os_free_pages(b->block, b->size); os_free_pages(b->block, b->size);
@ -401,19 +411,19 @@ static int block_cache_compact(void **src_block) {
return b->want_compact; return b->want_compact;
} }
static ssize_t block_cache_flush_freed_pages(BlockCache* bc) { static ssize_t block_cache_flush_freed_pages(BlockCache* bc, int force) {
block_desc *b; block_desc *b;
block_desc *bn; block_desc *bn;
ssize_t size_diff = 0; ssize_t size_diff = 0;
ssize_t alloc_cache_size_diff = 0; ssize_t alloc_cache_size_diff = 0;
gclist_each_item_safe(b, bn, &bc->atomic.free, block_desc, gclist) { gclist_each_item_safe(b, bn, &bc->atomic.free, block_desc, gclist) {
if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b, 0); } if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b, 0, force); }
} }
gclist_each_item_safe(b, bn, &bc->non_atomic.free, block_desc, gclist) { gclist_each_item_safe(b, bn, &bc->non_atomic.free, block_desc, gclist) {
if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b, 1); } if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b, 1, force); }
} }
alloc_cache_size_diff = alloc_cache_flush_freed_pages(bc->bigBlockCache); alloc_cache_size_diff = alloc_cache_flush_freed_pages(bc->bigBlockCache, force);
return size_diff + alloc_cache_size_diff; return size_diff + alloc_cache_size_diff;
} }

View File

@ -368,7 +368,7 @@ static void check_excessive_free_pages(NewGC *gc) {
We choose 4 instead of 2 for "excessive" because a block cache (when We choose 4 instead of 2 for "excessive" because a block cache (when
available) has a fill factor of 2, and flushing will not reduce that. */ available) has a fill factor of 2, and flushing will not reduce that. */
if (mmu_memory_allocated(gc->mmu) > ((gc->used_pages << (LOG_APAGE_SIZE + 2)))) { if (mmu_memory_allocated(gc->mmu) > ((gc->used_pages << (LOG_APAGE_SIZE + 2)))) {
mmu_flush_freed_pages(gc->mmu); mmu_flush_freed_pages(gc->mmu, 1);
} }
} }
@ -5602,11 +5602,12 @@ static void garbage_collect(NewGC *gc, int force_full, int no_full,
TIME_STEP("protect"); TIME_STEP("protect");
if (gc->gc_full) { if (gc->gc_full) {
mmu_flush_freed_pages(gc->mmu); mmu_flush_freed_pages(gc->mmu, 0);
gc->high_fragmentation = (mmu_memory_allocated_and_used(gc->mmu) gc->high_fragmentation = (mmu_memory_allocated_and_used(gc->mmu)
> (HIGH_FRAGMENTATION_RATIO > (HIGH_FRAGMENTATION_RATIO
* (gc->memory_in_use + gen_half_size_in_use(gc) + GEN0_MAX_SIZE))); * (gc->memory_in_use + gen_half_size_in_use(gc) + GEN0_MAX_SIZE)));
} }
TIME_STEP("flush");
reset_finalizer_tree(gc); reset_finalizer_tree(gc);
if (gc->gc_full || !gc->started_incremental) if (gc->gc_full || !gc->started_incremental)
@ -5833,7 +5834,7 @@ static void free_gc(NewGC *gc)
free_all_stack_pages(gc); free_all_stack_pages(gc);
free_incremental_admin_pages(gc); free_incremental_admin_pages(gc);
mmu_flush_freed_pages(gc->mmu); mmu_flush_freed_pages(gc->mmu, 1);
mmu_free(gc->mmu); mmu_free(gc->mmu);
ofm_free(gc->mark_table, gc->number_of_tags * sizeof(Mark2_Proc)); ofm_free(gc->mark_table, gc->number_of_tags * sizeof(Mark2_Proc));

View File

@ -34,6 +34,10 @@ enum {
#endif #endif
#define QUEUED_MPROTECT_INFECTS_MED 0 #define QUEUED_MPROTECT_INFECTS_MED 0
/* How many non-forcing calls to flush will be ignored by a cached,
free object? */
# define FREE_UNMAP_AGE 1
/* Either USE_ALLOC_CACHE or OS_ALLOCATOR_NEEDS_ALIGNMENT must be /* Either USE_ALLOC_CACHE or OS_ALLOCATOR_NEEDS_ALIGNMENT must be
enabled, unless the lower-level allocator's alignment matches enabled, unless the lower-level allocator's alignment matches
APAGE_SIZE. */ APAGE_SIZE. */
@ -182,12 +186,12 @@ static void mmu_free_page(MMU* mmu, void *p, size_t len, int type, int expect_mp
#endif #endif
} }
static void mmu_flush_freed_pages(MMU *mmu) { static void mmu_flush_freed_pages(MMU *mmu, int force) {
#ifdef USE_BLOCK_CACHE #ifdef USE_BLOCK_CACHE
mmu->memory_allocated += block_cache_flush_freed_pages(mmu->block_cache); mmu->memory_allocated += block_cache_flush_freed_pages(mmu->block_cache, force);
#elif defined(USE_ALLOC_CACHE) #elif defined(USE_ALLOC_CACHE)
mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[0]); mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[0], force);
mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[1]); mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[1], force);
#endif #endif
} }

View File

@ -1014,6 +1014,7 @@ static void *ofm_malloc_zero(size_t len)
# define APAGE_SIZE SECTOR_SEGMENT_SIZE # define APAGE_SIZE SECTOR_SEGMENT_SIZE
# define NO_ALLOC_CACHE_FREE # define NO_ALLOC_CACHE_FREE
# define FREE_UNMAP_AGE 1
# include "../gc2/my_qsort.c" # include "../gc2/my_qsort.c"
# include "../gc2/alloc_cache.c" # include "../gc2/alloc_cache.c"
static AllocCacheBlock *alloc_cache; static AllocCacheBlock *alloc_cache;
@ -1045,7 +1046,7 @@ static void free_plain_sector(void *p, int count, int executable)
static void flush_freed_sectors() static void flush_freed_sectors()
{ {
if (alloc_cache) if (alloc_cache)
alloc_cache_flush_freed_pages(alloc_cache); alloc_cache_flush_freed_pages(alloc_cache, 0);
} }
#endif #endif