Removed GC global and ACTUALLY_USED macros from allocators

svn: r12305
This commit is contained in:
Kevin Tew 2008-11-05 21:10:13 +00:00
parent 90f1c78c53
commit 1d5d7ea18f
10 changed files with 143 additions and 107 deletions

View File

@ -1,22 +1,19 @@
/*
Provides:
find_cached_pages --- same interface as malloc_pages
vm_malloc_pages --- usual interface
vm_free_pages --- usual interface
vm_flush_freed_pages --- usual interface
Requires (defined earlier):
page_size --- in bytes
my_qsort --- possibly from my_qsort.c
ACTUALLY_ALLOCATING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
*/
/* interface to GC */
/*
static void *malloc_pages(size_t len, size_t alignment);
static void *malloc_dirty_pages(size_t len, size_t alignment);
static void free_pages(void *p, size_t len);
static void flush_freed_pages(void);
static void protect_pages(void *p, size_t len, int writable);
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok);
static void vm_free_pages(void *p, size_t len);
static void vm_flush_freed_pages(void);
static void vm_protect_pages(void *p, size_t len, int writable);
*/
/* interface to OS */
@ -26,21 +23,23 @@ static void *os_vm_alloc_pages(size_t len);
*/
#define BLOCKFREE_UNMAP_AGE 1
static int compare_free_block(const void *a, const void *b)
static int free_block_compare(const void *a, const void *b)
{
if ((unsigned long)((Free_Block *)a)->start < (unsigned long)((Free_Block *)b)->start)
if ((unsigned long)((FreeBlock *)a)->start < (unsigned long)((FreeBlock *)b)->start)
return -1;
else
return 1;
}
static void collapse_adjacent_pages(void)
static void alloc_cache_collapse_pages(FreeBlock *blockfree)
{
int i, j;
Free_Block *blockfree = GC->blockfree;
int i;
int j;
/* sort by FreeBlock->start */
my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(FreeBlock), free_block_compare);
/* collapse adjacent: */
my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(Free_Block), compare_free_block);
j = 0;
for (i = 1; i < BLOCKFREE_CACHE_SIZE; i++) {
if ((blockfree[j].start + blockfree[j].len) == blockfree[i].start) {
@ -54,11 +53,10 @@ static void collapse_adjacent_pages(void)
}
}
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok)
inline static void *alloc_cache_find_pages(FreeBlock *blockfree, size_t len, size_t alignment, int dirty_ok)
{
int i;
void *r;
Free_Block *blockfree = GC->blockfree;
/* Try an exact fit: */
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
@ -105,10 +103,10 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
return NULL;
}
static void return_mem_to_cache(void *p, size_t len, int zeroed)
static void alloc_cache_return_mem(VM *vm, void *p, size_t len, int zeroed)
{
int i;
Free_Block *blockfree = GC->blockfree;
FreeBlock *blockfree = vm->freeblocks;
/* Round up to nearest page: */
if (len & (page_size - 1))
@ -144,30 +142,30 @@ static void return_mem_to_cache(void *p, size_t len, int zeroed)
}
/* Might help next time around: */
collapse_adjacent_pages();
alloc_cache_collapse_pages(blockfree);
os_vm_free_pages(p, len);
ACTUALLY_FREEING_PAGES(len);
vm_memory_allocated_dec(vm, len);
}
static void vm_free_pages(void *p, size_t len)
static void vm_free_pages(VM *vm, void *p, size_t len)
{
return_mem_to_cache(p, len, 0);
alloc_cache_return_mem(vm, p, len, 0);
}
static void vm_flush_freed_pages(void)
static void vm_flush_freed_pages(VM *vm)
{
int i;
Free_Block *blockfree = GC->blockfree;
FreeBlock *blockfree = vm->freeblocks;
collapse_adjacent_pages();
alloc_cache_collapse_pages(blockfree);
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
if (blockfree[i].start) {
if (blockfree[i].age == BLOCKFREE_UNMAP_AGE) {
os_vm_free_pages(blockfree[i].start, blockfree[i].len);
ACTUALLY_FREEING_PAGES(blockfree[i].len);
vm_memory_allocated_dec(vm, blockfree[i].len);
blockfree[i].start = NULL;
blockfree[i].len = 0;
} else
@ -186,9 +184,10 @@ static void vm_flush_freed_pages(void)
mechanism, but we do a bit of work to collapse adjacent pages in
the cache. */
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
{
void *r;
FreeBlock *blockfree = vm->freeblocks;
if (!page_size)
page_size = getpagesize();
@ -198,7 +197,7 @@ static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
len += page_size - (len & (page_size - 1));
/* Something from the cache, perhaps? */
r = find_cached_pages(len, alignment, dirty_ok);
r = alloc_cache_find_pages(blockfree, len, alignment, dirty_ok);
if(!r) {
/* attempt to allocate from OS */
r = os_vm_alloc_pages(len + alignment);
@ -217,15 +216,15 @@ static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
if (pre_extra == 0) {
/* Instead of actually unmapping, put it in the cache, and there's
a good chance we can use it next time: */
ACTUALLY_ALLOCATING_PAGES(extra);
return_mem_to_cache(real_r + len, extra, 1);
vm_memory_allocated_inc(vm, extra);
alloc_cache_return_mem(vm, real_r + len, extra, 1);
}
else { os_vm_free_pages(real_r + len, extra - pre_extra); }
}
r = real_r;
}
ACTUALLY_ALLOCATING_PAGES(len);
vm_memory_allocated_inc(vm, len);
}
return r;

View File

@ -66,13 +66,3 @@ typedef struct finalizer {
struct finalizer *left;
struct finalizer *right;
} Fnl;
typedef struct {
void *start;
long len;
short age;
short zeroed;
} Free_Block;
#define BLOCKFREE_CACHE_SIZE 96

View File

@ -352,9 +352,6 @@ static MSet *sets[NUM_SETS]; /* First one is tagged, last one is atomic */
/********************* Statistics *********************/
static long page_allocations = 0;
static long page_reservations = 0;
#define ACTUALLY_ALLOCATING_PAGES(len) (page_reservations += len)
#define ACTUALLY_FREEING_PAGES(len) (page_reservations -= len)
static long memory_in_use, gc_threshold = GROW_ADDITION, max_memory_use;
static int prev_memory_in_use, memory_use_growth;
@ -429,13 +426,13 @@ static int just_checking, the_size;
static void *malloc_pages(size_t len, size_t alignment)
{
page_allocations += len;
return vm_malloc_pages(len, alignment, 0);
return vm_malloc_pages(GC->vm, len, alignment, 0);
}
static void free_pages(void *p, size_t len)
{
page_allocations -= len;
vm_free_pages(p, len);
vm_free_pages(GC->vm, p, len);
}
/******************************************************************************/
@ -449,6 +446,11 @@ void GC_set_stack_base(void *base)
stack_base = (unsigned long)base;
}
void CompactGC_initialize(CompactGC *gc) {
memset(gc, 0, sizeof(CompactGC));
gc->vm = vm_create();
}
void GC_init_type_tags(int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
{
weak_box_tag = weakbox;
@ -2598,7 +2600,7 @@ static void free_unused_mpages()
}
}
vm_flush_freed_pages();
vm_flush_freed_pages(GC->vm);
}
void promote_all_ages()
@ -4734,8 +4736,8 @@ void GC_dump_with_traces(int flags,
(long)FREE_LIST_DELTA,
(100.0 * FREE_LIST_DELTA) / memory_in_use);
GCPRINT(GCOUTF, "Mmap overhead: %ld (%.2f%%)\n",
page_reservations - memory_in_use + FREE_LIST_DELTA,
(100.0 * ((double)page_reservations - memory_in_use)) / memory_in_use);
vm_memory_allocated(GC->vm) - memory_in_use + FREE_LIST_DELTA,
(100.0 * ((double) vm_memory_allocated(GC->vm) - memory_in_use)) / memory_in_use);
#if KEEP_BACKPOINTERS
if (flags & GC_DUMP_SHOW_TRACE) {
@ -4767,5 +4769,8 @@ void GC_dump(void)
void GC_free_all(void)
{
vm_flush_freed_pages(GC->vm);
vm_free(GC->vm);
free(GC);
}

View File

@ -19,10 +19,5 @@ typedef struct CompactGC {
GC_Weak_Box *weak_boxes;
GC_Ephemeron *ephemerons;
int num_last_seen_ephemerons;
Free_Block *blockfree;
struct VM *vm;
} CompactGC;
void CompactGC_initialize(CompactGC *gc) {
memset(gc, 0, sizeof(CompactGC));
gc->blockfree = malloc(sizeof(Free_Block) *BLOCKFREE_CACHE_SIZE);
}

View File

@ -178,9 +178,6 @@ inline static void check_used_against_max(size_t len)
}
}
#define ACTUALLY_ALLOCATING_PAGES(len) GC->actual_pages_size += len
#define ACTUALLY_FREEING_PAGES(len) GC->actual_pages_size -= len
#include "page_range.c"
#include "vm.c"
@ -190,19 +187,19 @@ inline static void check_used_against_max(size_t len)
static void *malloc_pages(size_t len, size_t alignment)
{
check_used_against_max(len);
return vm_malloc_pages(len, alignment, 0);
return vm_malloc_pages(GC->vm, len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
check_used_against_max(len);
return vm_malloc_pages(len, alignment, 1);
return vm_malloc_pages(GC->vm, len, alignment, 1);
}
static void free_pages(void *p, size_t len)
{
GC->used_pages -= (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);
vm_free_pages(p, len);
vm_free_pages(GC->vm, p, len);
}
@ -1324,6 +1321,23 @@ void GC_write_barrier(void *p)
#include "sighand.c"
void NewGC_initialize(NewGC *newgc) {
memset(newgc, 0, sizeof(NewGC));
newgc->mark_table = malloc(NUMBER_OF_TAGS * sizeof (Mark_Proc));
newgc->fixup_table = malloc(NUMBER_OF_TAGS * sizeof (Fixup_Proc));
#ifdef SIXTY_FOUR_BIT_INTEGERS
newgc->page_maps = malloc(PAGEMAP64_LEVEL1_SIZE * sizeof (mpage***));
#else
newgc->page_maps = malloc(PAGEMAP32_SIZE * sizeof (mpage*));
#endif
newgc->vm = vm_create();
newgc->protect_range = malloc(sizeof(Page_Range));
newgc->generations_available = 1;
newgc->last_full_mem_use = (20 * 1024 * 1024);
newgc->new_btc_mark = 1;
}
void GC_init_type_tags(int count, int pair, int mutable_pair, int weakbox, int ephemeron, int weakarray, int custbox)
{
static int initialized = 0;
@ -1797,7 +1811,7 @@ void GC_dump_with_traces(int flags,
GCWARN((GCOUTF,"Peak memory use after a collection: %li\n", GC->peak_memory_use));
GCWARN((GCOUTF,"Allocated (+reserved) page sizes: %li (+%li)\n",
GC->used_pages * APAGE_SIZE,
GC->actual_pages_size - (GC->used_pages * APAGE_SIZE)));
vm_memory_allocated(GC->vm) - (GC->used_pages * APAGE_SIZE)));
GCWARN((GCOUTF,"# of major collections: %li\n", GC->num_major_collects));
GCWARN((GCOUTF,"# of minor collections: %li\n", GC->num_minor_collects));
GCWARN((GCOUTF,"# of installed finalizers: %i\n", GC->num_fnls));
@ -2411,7 +2425,7 @@ static void garbage_collect(int force_full)
protect_old_pages();
TIME_STEP("protect");
if (gc->gc_full)
vm_flush_freed_pages();
vm_flush_freed_pages(gc->vm);
reset_finalizer_tree();
TIME_STEP("reset");
@ -2422,8 +2436,8 @@ static void garbage_collect(int force_full)
gc->no_further_modifications = 0;
/* If we have too many idle pages, flush: */
if (gc->actual_pages_size > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) {
vm_flush_freed_pages();
if (vm_memory_allocated(gc->vm) > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) {
vm_flush_freed_pages(gc->vm);
}
/* update some statistics */
@ -2558,5 +2572,7 @@ void GC_free_all(void)
}
}
vm_flush_freed_pages();
vm_flush_freed_pages(GC->vm);
vm_free(GC->vm);
free(GC);
}

View File

@ -171,22 +171,6 @@ typedef struct NewGC {
GC_Weak_Box *weak_boxes;
GC_Ephemeron *ephemerons;
int num_last_seen_ephemerons;
Free_Block *blockfree;
} NewGC;
struct VM *vm;
void NewGC_initialize(NewGC *newgc) {
memset(newgc, 0, sizeof(NewGC));
newgc->mark_table = malloc(NUMBER_OF_TAGS * sizeof (Mark_Proc));
newgc->fixup_table = malloc(NUMBER_OF_TAGS * sizeof (Fixup_Proc));
#ifdef SIXTY_FOUR_BIT_INTEGERS
newgc->page_maps = malloc(PAGEMAP64_LEVEL1_SIZE * sizeof (mpage***));
#else
newgc->page_maps = malloc(PAGEMAP32_SIZE * sizeof (mpage*));
#endif
newgc->blockfree = malloc(sizeof(Free_Block) * BLOCKFREE_CACHE_SIZE);
newgc->protect_range = malloc(sizeof(Page_Range));
newgc->generations_available = 1;
newgc->last_full_mem_use = (20 * 1024 * 1024);
newgc->new_btc_mark = 1;
}
} NewGC;

View File

@ -8,6 +8,59 @@
# define GCOUTF stderr
#endif
static inline size_t vm_round_up_to_page_size(size_t len, size_t page_size) {
len += (page_size -1) - (len & (page_size - 1));
return len;
};
#if !( defined(_WIN32) || defined(OSKIT) )
typedef struct {
void *start;
long len;
short age;
short zeroed;
} FreeBlock;
#endif
typedef struct VM {
#if !( defined(_WIN32) || defined(OSKIT) )
FreeBlock *freeblocks;
#endif
size_t memory_allocated;
} VM;
static VM *vm_create() {
VM *vm = malloc(sizeof(VM));
memset(vm, 0, sizeof(VM));
#if !( defined(_WIN32) || defined(OSKIT) )
#define BLOCKFREE_CACHE_SIZE 96
vm->freeblocks = malloc(sizeof(FreeBlock) * BLOCKFREE_CACHE_SIZE);
#endif
return vm;
}
static void vm_free(VM *vm) {
#if !( defined(_WIN32) || defined(OSKIT) )
free(vm->freeblocks);
#endif
free(vm);
}
static size_t vm_memory_allocated(VM *vm) {
return vm->memory_allocated;
}
static size_t vm_memory_allocated_inc(VM *vm, size_t len) {
vm->memory_allocated += len;
return vm->memory_allocated;
}
static size_t vm_memory_allocated_dec(VM *vm, size_t len) {
vm->memory_allocated -= len;
return vm->memory_allocated;
}
#if _WIN32 /* Windows */
# include "vm_win.c"
#elif defined(OSKIT) /* OSKit */

View File

@ -3,8 +3,6 @@
posix_memalign-based allocator
determine_max_heap_size() (uses rlimit_heapsize.c)
Requires:
ACTUALLY_ALLOCATING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
@ -16,7 +14,7 @@
static int page_size; /* OS page size */
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
{
void *r;
@ -33,20 +31,20 @@ static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
return NULL;
}
ACTUALLY_ALLOCATING_PAGES(len);
vm_memory_allocated_inc(vm, len);
if(!dirty_ok)
memset(p, 0, len);
return r;
}
static void vm_free_pages(void *p, size_t len)
static void vm_free_pages(VM *vm, void *p, size_t len)
{
ACTUALLY_FREEING_PAGES(len);
vm_memory_allocated_dec(vm, len);
free(p);
}
static void vm_flush_freed_pages(void)
static void vm_flush_freed_pages(VM *vm)
{
}

View File

@ -3,15 +3,13 @@
allocator
determine_max_heap_size()
Requires:
ACTUALLY_ALLOCATING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
#include <oskit/c/malloc.h>
inline static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
inline static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
{
void *p;
@ -20,18 +18,18 @@ inline static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
if (!dirty_ok)
memset(p, 0, len);
ACTUALLY_ALLOCATING_PAGES(len);
vm_memory_allocated_inc(vm, len);
return p;
}
static void vm_free_pages(void *p, size_t len)
static void vm_free_pages(VM *vm, void *p, size_t len)
{
vm_memory_allocated_dec(vm, len);
sfree(p, len);
ACTUALLY_FREEING_PAGES(len);
}
static void vm_flush_freed_pages(void)
static void vm_flush_freed_pages(VM *vm)
{
}

View File

@ -3,8 +3,6 @@
allocator
determine_max_heap_size()
Requires:
ACTUALLY_ALLOCATING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
@ -22,7 +20,7 @@ typedef struct {
static alloc_cache_entry cache[2][CACHE_SLOTS];
#endif
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
{
#if CACHE_SLOTS
{
@ -44,7 +42,7 @@ static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
}
#endif
ACTUALLY_ALLOCATING_PAGES(len);
vm_memory_allocated_inc(vm, len);
/* VirtualAlloc MEM_COMMIT always zeros memory */
return (void *)VirtualAlloc(NULL, len,
@ -52,7 +50,7 @@ static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
PAGE_READWRITE);
}
static void vm_free_pages(void *p, size_t len)
static void vm_free_pages(VM *vm, void *p, size_t len)
{
#if CACHE_SLOTS
@ -71,12 +69,11 @@ static void vm_free_pages(void *p, size_t len)
}
#endif
ACTUALLY_FREEING_PAGES(len);
vm_memory_allocated_dec(vm, len);
VirtualFree(p, 0, MEM_RELEASE);
}
static void vm_flush_freed_pages(void)
static void vm_flush_freed_pages(VM *vm)
{
#if CACHE_SLOTS
int i;
@ -86,7 +83,7 @@ static void vm_flush_freed_pages(void)
if (cache[1][i].len) {
for (p = cache[1][i].page; p; p = next) {
next = *(void **)p;
ACTUALLY_FREEING_PAGES(cache[i].len);
vm_memory_allocated_dec(vm, cache[i].len);
VirtualFree(p, 0, MEM_RELEASE);
}
}
@ -112,6 +109,7 @@ typedef BOOL (WINAPI * QueryInformationJobObject_Proc)(HANDLE hJob,
LPVOID lpJobObjectInfo,
DWORD cbJobObjectInfoLength,
LPDWORD lpReturnLength);
static size_type determine_max_heap_size(void)
{
JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;