New mmu block layer to avoid uneeded mprotects
This commit is contained in:
parent
f73d63e1af
commit
41d1daf53c
|
@ -323,6 +323,7 @@ main.@LTO@: $(XSRCDIR)/main.c
|
|||
|
||||
gc2.@LTO@: \
|
||||
$(srcdir)/alloc_cache.c \
|
||||
$(srcdir)/block_cache.c \
|
||||
$(srcdir)/backtrace.c \
|
||||
$(srcdir)/commongc_internal.h \
|
||||
$(srcdir)/platforms.h \
|
||||
|
@ -351,7 +352,6 @@ gc2.@LTO@: \
|
|||
$(srcdir)/newgc.c \
|
||||
$(srcdir)/newgc.h \
|
||||
$(srcdir)/page_range.c \
|
||||
$(srcdir)/protect_range.c \
|
||||
$(srcdir)/rlimit_heapsize.c \
|
||||
$(srcdir)/roots.c \
|
||||
$(srcdir)/stack_comp.c \
|
||||
|
|
|
@ -1,43 +1,42 @@
|
|||
/*
|
||||
Provides:
|
||||
vm_malloc_pages --- usual interface
|
||||
vm_free_pages --- usual interface
|
||||
vm_flush_freed_pages --- usual interface
|
||||
static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty)
|
||||
static ssize_t void alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree)
|
||||
static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff)
|
||||
Requires (defined earlier):
|
||||
page_size --- in bytes
|
||||
my_qsort --- possibly from my_qsort.c
|
||||
static void os_vm_free_pages(void *p, size_t len);
|
||||
static void *os_vm_alloc_pages(size_t len);
|
||||
*/
|
||||
|
||||
/* interface to GC */
|
||||
/*
|
||||
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok);
|
||||
static void vm_free_pages(void *p, size_t len);
|
||||
static void vm_flush_freed_pages(void);
|
||||
static void vm_protect_pages(void *p, size_t len, int writable);
|
||||
*/
|
||||
/* Controls how often freed pages are actually returned to OS: */
|
||||
#define BLOCKFREE_UNMAP_AGE 3
|
||||
|
||||
/* interface to OS */
|
||||
/*
|
||||
static void os_vm_free_pages(void *p, size_t len);
|
||||
static void *os_vm_alloc_pages(size_t len);
|
||||
*/
|
||||
#define BLOCKFREE_UNMAP_AGE 1
|
||||
/* Controls size of the cache */
|
||||
#define BLOCKFREE_CACHE_SIZE 3000
|
||||
|
||||
static int free_block_compare(const void *a, const void *b)
|
||||
/* Controls how many extra pages are requested from OS at a time: */
|
||||
#define CACHE_SEED_PAGES 16
|
||||
|
||||
static AllocCacheBlock *alloc_cache_create() {
|
||||
return ofm_malloc_zero(sizeof(AllocCacheBlock) * BLOCKFREE_CACHE_SIZE);
|
||||
}
|
||||
|
||||
static int alloc_cache_block_compare(const void *a, const void *b)
|
||||
{
|
||||
if ((unsigned long)((FreeBlock *)a)->start < (unsigned long)((FreeBlock *)b)->start)
|
||||
if ((unsigned long)((AllocCacheBlock *)a)->start < (unsigned long)((AllocCacheBlock *)b)->start)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void alloc_cache_collapse_pages(FreeBlock *blockfree)
|
||||
static void alloc_cache_collapse_pages(AllocCacheBlock *blockfree)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
|
||||
/* sort by FreeBlock->start */
|
||||
my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(FreeBlock), free_block_compare);
|
||||
/* sort by AllocCacheBlock->start */
|
||||
my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(AllocCacheBlock), alloc_cache_block_compare);
|
||||
|
||||
/* collapse adjacent: */
|
||||
j = 0;
|
||||
|
@ -53,7 +52,7 @@ static void alloc_cache_collapse_pages(FreeBlock *blockfree)
|
|||
}
|
||||
}
|
||||
|
||||
inline static void *alloc_cache_find_pages(FreeBlock *blockfree, size_t len, size_t alignment, int dirty_ok)
|
||||
inline static void *alloc_cache_find_pages(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
int i;
|
||||
void *r;
|
||||
|
@ -103,14 +102,9 @@ inline static void *alloc_cache_find_pages(FreeBlock *blockfree, size_t len, siz
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void alloc_cache_return_mem(VM *vm, char *p, size_t len, int zeroed)
|
||||
static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty)
|
||||
{
|
||||
int i;
|
||||
FreeBlock *blockfree = vm->freeblocks;
|
||||
|
||||
/* Round up to nearest page: */
|
||||
if (len & (page_size - 1))
|
||||
len += page_size - (len & (page_size - 1));
|
||||
|
||||
/* Try to free pages in larger blocks, since the OS may be slow. */
|
||||
|
||||
|
@ -118,16 +112,16 @@ static void alloc_cache_return_mem(VM *vm, char *p, size_t len, int zeroed)
|
|||
if(blockfree[i].start && (blockfree[i].len < (1024 * 1024))) {
|
||||
if (p == blockfree[i].start + blockfree[i].len) {
|
||||
blockfree[i].len += len;
|
||||
if (!zeroed)
|
||||
if (dirty)
|
||||
blockfree[i].zeroed = 0;
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
if (p + len == blockfree[i].start) {
|
||||
blockfree[i].start = p;
|
||||
blockfree[i].len += len;
|
||||
if (!zeroed)
|
||||
if (dirty)
|
||||
blockfree[i].zeroed = 0;
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,42 +130,36 @@ static void alloc_cache_return_mem(VM *vm, char *p, size_t len, int zeroed)
|
|||
blockfree[i].start = p;
|
||||
blockfree[i].len = len;
|
||||
blockfree[i].age = 0;
|
||||
blockfree[i].zeroed = zeroed;
|
||||
return;
|
||||
blockfree[i].zeroed = !dirty;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Might help next time around: */
|
||||
alloc_cache_collapse_pages(blockfree);
|
||||
|
||||
os_vm_free_pages(p, len);
|
||||
|
||||
vm_memory_allocated_dec(vm, len);
|
||||
os_free_pages(p, len);
|
||||
return -len;
|
||||
}
|
||||
|
||||
static void vm_free_pages(VM *vm, void *p, size_t len)
|
||||
{
|
||||
alloc_cache_return_mem(vm, p, len, 0);
|
||||
}
|
||||
|
||||
static void vm_flush_freed_pages(VM *vm)
|
||||
static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree)
|
||||
{
|
||||
int i;
|
||||
FreeBlock *blockfree = vm->freeblocks;
|
||||
|
||||
ssize_t freed = 0;
|
||||
alloc_cache_collapse_pages(blockfree);
|
||||
|
||||
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
|
||||
if (blockfree[i].start) {
|
||||
if (blockfree[i].age == BLOCKFREE_UNMAP_AGE) {
|
||||
os_vm_free_pages(blockfree[i].start, blockfree[i].len);
|
||||
vm_memory_allocated_dec(vm, blockfree[i].len);
|
||||
os_free_pages(blockfree[i].start, blockfree[i].len);
|
||||
freed -= blockfree[i].len;
|
||||
blockfree[i].start = NULL;
|
||||
blockfree[i].len = 0;
|
||||
} else
|
||||
blockfree[i].age++;
|
||||
}
|
||||
}
|
||||
return freed;
|
||||
}
|
||||
|
||||
/* Instead of immediately freeing pages with munmap---only to mmap
|
||||
|
@ -184,47 +172,41 @@ static void vm_flush_freed_pages(VM *vm)
|
|||
mechanism, but we do a bit of work to collapse adjacent pages in
|
||||
the cache. */
|
||||
|
||||
static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
||||
static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff)
|
||||
{
|
||||
char *r;
|
||||
FreeBlock *blockfree = vm->freeblocks;
|
||||
|
||||
if (!page_size)
|
||||
page_size = getpagesize();
|
||||
|
||||
/* Round up to nearest page: */
|
||||
if (len & (page_size - 1))
|
||||
len += page_size - (len & (page_size - 1));
|
||||
|
||||
/* Something from the cache, perhaps? */
|
||||
r = alloc_cache_find_pages(blockfree, len, alignment, dirty_ok);
|
||||
if(!r) {
|
||||
/* attempt to allocate from OS */
|
||||
r = os_vm_alloc_pages(len + alignment);
|
||||
size_t extra = alignment + CACHE_SEED_PAGES * APAGE_SIZE;
|
||||
r = os_alloc_pages(len + extra);
|
||||
if(r == (void *)-1) { return NULL; }
|
||||
|
||||
if (alignment) {
|
||||
/* We allocated too large so we can choose the alignment. */
|
||||
size_t extra = alignment;
|
||||
char *real_r = (char*)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
|
||||
long pre_extra = real_r - r;
|
||||
|
||||
/* in front extra */
|
||||
if (pre_extra) { os_vm_free_pages(r, pre_extra); }
|
||||
if (pre_extra) {
|
||||
/* printf("FREEING FRONT %p %lx\n", r, pre_extra); */
|
||||
os_free_pages(r, pre_extra); }
|
||||
/* in back extra exists */
|
||||
if (pre_extra < extra) {
|
||||
if (pre_extra == 0) {
|
||||
/* Instead of actually unmapping, put it in the cache, and there's
|
||||
a good chance we can use it next time: */
|
||||
vm_memory_allocated_inc(vm, extra);
|
||||
alloc_cache_return_mem(vm, real_r + len, extra, 1);
|
||||
(*size_diff) += extra;
|
||||
(*size_diff) += alloc_cache_free_page(blockfree, real_r + len, extra, 1);
|
||||
}
|
||||
else { os_vm_free_pages(real_r + len, extra - pre_extra); }
|
||||
else { os_free_pages(real_r + len, extra - pre_extra); }
|
||||
}
|
||||
r = real_r;
|
||||
}
|
||||
|
||||
vm_memory_allocated_inc(vm, len);
|
||||
(*size_diff) += extra;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
|
371
src/racket/gc2/block_cache.c
Normal file
371
src/racket/gc2/block_cache.c
Normal file
|
@ -0,0 +1,371 @@
|
|||
/******************************************************************************/
|
||||
/* OS-specific low-level allocator */
|
||||
/******************************************************************************/
|
||||
#include "gclist.h"
|
||||
/* requires: */
|
||||
static void *os_alloc_pages(size_t len);
|
||||
static void os_free_pages(void *p, size_t len);
|
||||
static void os_protect_pages(void *p, size_t len, int writable);
|
||||
|
||||
#define BC_BLOCK_SIZE (1 << 24) /* 16 MB */
|
||||
|
||||
struct block_desc;
|
||||
static AllocCacheBlock *alloc_cache_create();
|
||||
static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty);
|
||||
static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree);
|
||||
static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff);
|
||||
|
||||
static Page_Range *page_range_create();
|
||||
static void page_range_flush(Page_Range *pr, int writeable);
|
||||
static void page_range_add(Page_Range *pr, void *_start, unsigned long len, int writeable);
|
||||
|
||||
|
||||
#ifdef BC_ASSERTS
|
||||
static int block_cache_chain_stat(GCList *head, int *bcnt);
|
||||
#endif
|
||||
|
||||
struct block_group;
|
||||
typedef struct block_desc {
|
||||
GCList gclist;
|
||||
void *block;
|
||||
void *free;
|
||||
long size;
|
||||
long used;
|
||||
long totalcnt;
|
||||
long freecnt;
|
||||
struct block_group *group;
|
||||
} block_desc;
|
||||
|
||||
typedef struct block_group {
|
||||
GCList full;
|
||||
GCList free;
|
||||
int atomic;
|
||||
} block_group;
|
||||
|
||||
typedef struct BlockCache {
|
||||
block_group atomic;
|
||||
block_group non_atomic;
|
||||
AllocCacheBlock *bigBlockCache;
|
||||
Page_Range *page_range;
|
||||
MMU *mmu;
|
||||
} BlockCache;
|
||||
|
||||
typedef struct pfree_list {
|
||||
void *next;
|
||||
int dirty;
|
||||
} pfree_list;
|
||||
|
||||
static BlockCache* block_cache_create(MMU *mmu) {
|
||||
BlockCache *bc = ofm_malloc_zero(sizeof(BlockCache));
|
||||
gclist_init(&bc->atomic.full);
|
||||
gclist_init(&bc->atomic.free);
|
||||
bc->atomic.atomic = 1;
|
||||
gclist_init(&bc->non_atomic.full);
|
||||
gclist_init(&bc->non_atomic.free);
|
||||
bc->atomic.atomic = 0;
|
||||
bc->bigBlockCache = alloc_cache_create();
|
||||
bc->page_range = page_range_create();
|
||||
bc->mmu = mmu;
|
||||
return bc;
|
||||
}
|
||||
|
||||
static void block_cache_free(BlockCache* bc) {
|
||||
free(bc);
|
||||
}
|
||||
|
||||
static block_desc *bc_alloc_std_block(block_group *bg) {
|
||||
void *r = os_alloc_pages(BC_BLOCK_SIZE);
|
||||
void *ps = align_up_ptr(r, APAGE_SIZE);
|
||||
|
||||
block_desc *bd = (block_desc*) ofm_malloc(sizeof(block_desc));
|
||||
bd->block = r;
|
||||
bd->free = ps;
|
||||
bd->size = BC_BLOCK_SIZE;
|
||||
bd->used = 0;
|
||||
bd->group = bg;
|
||||
gclist_init(&bd->gclist);
|
||||
|
||||
/* printf("ALLOC BLOCK %p-%p size %li %li %li %p\n", bd->block, bd->block + bd->size, bd->size, APAGE_SIZE, bd->size / APAGE_SIZE, bd->free); */
|
||||
/* free unaligned portion */
|
||||
{
|
||||
long diff = ps -r;
|
||||
if (diff) {
|
||||
long enddiff = APAGE_SIZE - diff;
|
||||
os_free_pages(r, diff);
|
||||
os_free_pages(r + BC_BLOCK_SIZE - enddiff, enddiff);
|
||||
bd->block = ps;
|
||||
bd->size = BC_BLOCK_SIZE - APAGE_SIZE;
|
||||
/* printf("UNALIGNED FROM OS %p %li %li\n", r, diff, enddiff); */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* setup free list of APAGE_SIZE sized pages inside block */
|
||||
{
|
||||
int i = 0;
|
||||
pfree_list *pe = (bd->block + bd->size);
|
||||
pfree_list *p = ps;
|
||||
pfree_list *n;
|
||||
while(p < pe) {
|
||||
n = ((void*) p) + APAGE_SIZE;
|
||||
p->next = n;
|
||||
p->dirty = 0;
|
||||
p = n;
|
||||
i++;
|
||||
}
|
||||
bd->totalcnt = i;
|
||||
bd->freecnt = i;
|
||||
if (p > pe) { p = (p - (2 * APAGE_SIZE)); }
|
||||
else { p = (p - APAGE_SIZE); }
|
||||
p->next = NULL;
|
||||
/* printf("ENDUP %p %p %p %i\n", n, p, p->next, i); */
|
||||
}
|
||||
|
||||
return bd;
|
||||
}
|
||||
|
||||
static void *bc_alloc_std_page(BlockCache *bc, int dirty_ok, int expect_mprotect, void **src_block, ssize_t *size_diff) {
|
||||
block_group *bg = (expect_mprotect ? &bc->non_atomic : &bc->atomic);
|
||||
GCList *free_head = &bg->free;
|
||||
int newbl = 0;
|
||||
|
||||
tryagain:
|
||||
if (!gclist_is_empty(free_head)) {
|
||||
if (!gclist_first_item(free_head, block_desc*, gclist)->free) {
|
||||
gclist_move(free_head->next, &bg->full);
|
||||
goto tryagain;
|
||||
}
|
||||
}
|
||||
else {
|
||||
newbl = 1;
|
||||
block_desc *bd = bc_alloc_std_block(bg);
|
||||
gclist_add(free_head, &(bd->gclist));
|
||||
(*size_diff) += bd->size;
|
||||
/* printf("ALLOC BLOCK %i %p %p-%p size %li %p\n", expect_mprotect, bg, bd->block, bd->block + bd->size, bd->size, bd->free); */
|
||||
}
|
||||
|
||||
{
|
||||
block_desc *bd = gclist_first_item(free_head, block_desc*, gclist);
|
||||
pfree_list *fl = bd->free;
|
||||
void *p = fl;
|
||||
bd->free = fl->next;
|
||||
bd->freecnt--;
|
||||
|
||||
*src_block = bd;
|
||||
if (expect_mprotect) {
|
||||
GC_MP_CNT_INC(mp_alloc_med_big_cnt);
|
||||
os_protect_pages(p, APAGE_SIZE, 1);
|
||||
}
|
||||
|
||||
if (!dirty_ok) {
|
||||
if (fl->dirty)
|
||||
memset(p, 0, APAGE_SIZE);
|
||||
else
|
||||
fl->next = 0;
|
||||
}
|
||||
|
||||
#if BC_ASSERTS
|
||||
assert(p >= bd->block);
|
||||
assert(p+APAGE_SIZE <= bd->block + bd->size);
|
||||
if (!bg->atomic)
|
||||
{
|
||||
int afub = 0;
|
||||
int afrb = 0;
|
||||
int nafub = 0;
|
||||
int nafrb = 0;
|
||||
int afu = block_cache_chain_stat(&bc->atomic.full, &afub);
|
||||
int afr = block_cache_chain_stat(&bc->atomic.free, &afrb);
|
||||
int nafu = block_cache_chain_stat(&bc->non_atomic.full, &nafub);
|
||||
int nafr = block_cache_chain_stat(&bc->non_atomic.free, &nafrb);
|
||||
printf("ALLOC PAGE %i %p %p-%p %03i %03i %04i %04i : %03i %03i %03i %03i %09i %s\n", expect_mprotect, bg, p, p + APAGE_SIZE, afu, afr, nafu, nafr, afub, afrb, nafub, nafrb, mmu_memory_allocated(bc->mmu), (newbl ? "NEW " : ""));
|
||||
}
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t bc_free_std_block(block_desc *b) {
|
||||
ssize_t size_diff = 0;
|
||||
gclist_del(&b->gclist);
|
||||
os_free_pages(b->block, b->size);
|
||||
size_diff -= b->size;
|
||||
free(b);
|
||||
return size_diff;
|
||||
}
|
||||
|
||||
static void *block_cache_alloc_page(BlockCache* bc, size_t len, size_t alignment, int dirty, int type, int expect_mprotect, void **src_block, ssize_t *size_diff) {
|
||||
switch(type) {
|
||||
case MMU_SMALL_GEN1:
|
||||
return bc_alloc_std_page(bc, dirty, expect_mprotect, src_block, size_diff);
|
||||
break;
|
||||
default:
|
||||
*(char**)src_block = (char*) ~0x0;
|
||||
return alloc_cache_alloc_page(bc->bigBlockCache, len, APAGE_SIZE, dirty, size_diff);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#if BC_ASSERTS
|
||||
static int find_addr_in_bd(GCList *head, void *p, char* msg) {
|
||||
block_desc *b;
|
||||
gclist_each_item(b, head, block_desc*, gclist) {
|
||||
if (p >= b->block && p < b->block + b->size) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t block_cache_free_page(BlockCache* bc, void *p, size_t len, int type, int expect_mprotect, void **src_block) {
|
||||
switch(type) {
|
||||
case MMU_SMALL_GEN1:
|
||||
{
|
||||
GCList *free_head = &((expect_mprotect ? &bc->non_atomic : &bc->atomic)->free);
|
||||
block_desc *b = (block_desc*)(*src_block);
|
||||
pfree_list *fl = p;
|
||||
fl->next = b->free;
|
||||
fl->dirty = 1;
|
||||
b->free = fl;
|
||||
#if BC_ASSERTS
|
||||
assert(*src_block != (char*)~0x0);
|
||||
assert(b->group == bg);
|
||||
#endif
|
||||
gclist_move(&b->gclist, free_head);
|
||||
b->freecnt++;
|
||||
#if BC_ASSERTS
|
||||
if (!bg->atomic)
|
||||
{
|
||||
int afub = 0;
|
||||
int afrb = 0;
|
||||
int nafub = 0;
|
||||
int nafrb = 0;
|
||||
int afu = block_cache_chain_stat(&bc->atomic.full, &afub);
|
||||
int afr = block_cache_chain_stat(&bc->atomic.free, &afrb);
|
||||
int nafu = block_cache_chain_stat(&bc->non_atomic.full, &nafub);
|
||||
int nafr = block_cache_chain_stat(&bc->non_atomic.free, &nafrb);
|
||||
printf("FREE PAGE %i %p %p-%p %03i %03i %04i %04i : %03i %03i %03i %03i %09i\n", expect_mprotect, bg, p, p + APAGE_SIZE, afu, afr, nafu, nafr, afub, afrb, nafub, nafrb, mmu_memory_allocated(bc->mmu));
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
#if BC_ASSERTS
|
||||
assert(!(find_addr_in_bd(&bc->atomic.full, p, "atomic full") ||
|
||||
find_addr_in_bd(&bc->atomic.free, p, "atomic freeblock") ||
|
||||
find_addr_in_bd(&bc->non_atomic.full, p, "non_atomic full") ||
|
||||
find_addr_in_bd(&bc->non_atomic.free, p, "non_atomic freeblock")));
|
||||
assert(*src_block == (char*)~0x0);
|
||||
#endif
|
||||
return alloc_cache_free_page(bc->bigBlockCache, p, len, MMU_DIRTY);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int sort_full_to_empty(void *priv, GCList *a, GCList *b) {
|
||||
block_desc *ba = gclist_item(a, block_desc*, gclist);
|
||||
block_desc *bb = gclist_item(b, block_desc*, gclist);
|
||||
|
||||
if ((ba->freecnt) <= (bb->freecnt)) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void block_cache_prep_for_compaction(BlockCache* bc) {
|
||||
gclist_sort(NULL, &bc->atomic.free, sort_full_to_empty);
|
||||
gclist_sort(NULL, &bc->non_atomic.free, sort_full_to_empty);
|
||||
#if 0
|
||||
{
|
||||
block_desc *b;
|
||||
gclist_each_item(b, &bc->atomic.free, block_desc*, gclist) {
|
||||
printf(" ATOMIC %05li %03li %p\n", b->freecnt, b->totalcnt, b); }
|
||||
gclist_each_item(b, &bc->non_atomic.free, block_desc*, gclist) {
|
||||
printf("NONATOMIC %03li %03li %p\n", b->freecnt, b->totalcnt, b);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int block_cache_compact(void **src_block) {
|
||||
block_desc *b = *src_block;
|
||||
if (b->freecnt > (b->totalcnt/2)) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t block_cache_flush_freed_pages(BlockCache* bc) {
|
||||
block_desc *b;
|
||||
block_desc *bn;
|
||||
ssize_t size_diff = 0;
|
||||
ssize_t alloc_cache_size_diff = 0;
|
||||
|
||||
gclist_each_item_safe(b, bn, &bc->atomic.free, block_desc*, gclist) {
|
||||
if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b); }
|
||||
}
|
||||
gclist_each_item_safe(b, bn, &bc->non_atomic.free, block_desc*, gclist) {
|
||||
if (b->freecnt == b->totalcnt) { size_diff += bc_free_std_block(b); }
|
||||
}
|
||||
alloc_cache_size_diff = alloc_cache_flush_freed_pages(bc->bigBlockCache);
|
||||
|
||||
#ifdef GC_MP_CNT
|
||||
mp_bc_freed = -size_diff;
|
||||
mp_ac_freed = -alloc_cache_size_diff;
|
||||
#endif
|
||||
|
||||
return size_diff + alloc_cache_size_diff;
|
||||
}
|
||||
|
||||
static void block_cache_queue_protect_range(BlockCache* bc, void *p, size_t len, int type, int writeable, void **src_block) {
|
||||
switch(type) {
|
||||
case MMU_SMALL_GEN1:
|
||||
#if BC_ASSERTS
|
||||
assert(!(find_addr_in_bd(&bc->atomic.full, p, "atomic full") ||
|
||||
find_addr_in_bd(&bc->atomic.free, p, "atomic freeblock")));
|
||||
assert(find_addr_in_bd(&bc->non_atomic.full, p, "non_atomic full") ||
|
||||
find_addr_in_bd(&bc->non_atomic.free, p, "non_atomic freeblock"));
|
||||
assert(*src_block != (char*)~0x0);
|
||||
#endif
|
||||
return;
|
||||
break;
|
||||
default:
|
||||
#if BC_ASSERTS
|
||||
assert(*src_block == (char*)~0x0);
|
||||
#endif
|
||||
page_range_add(bc->page_range, p, len, writeable);
|
||||
return;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void block_cache_flush_protect_ranges(BlockCache* bc, int writeable) {
|
||||
block_group *bg = &bc->non_atomic;
|
||||
block_desc *b;
|
||||
gclist_each_item(b, &bg->full, block_desc*, gclist) {
|
||||
page_range_add(bc->page_range, b->block, b->size, writeable);
|
||||
}
|
||||
gclist_each_item(b, &bg->free, block_desc*, gclist) {
|
||||
page_range_add(bc->page_range, b->block, b->size, writeable);
|
||||
}
|
||||
|
||||
page_range_flush(bc->page_range, writeable);
|
||||
}
|
||||
|
||||
#if BC_ASSERTS
|
||||
static int block_cache_chain_stat(GCList *head, int *blcnt) {
|
||||
block_desc *b;
|
||||
int freecnt = 0;
|
||||
gclist_each_item(b, head, block_desc*, gclist) {
|
||||
pfree_list *fl;
|
||||
int lfcnt = 0;
|
||||
for (fl = b->free; fl; fl = fl->next) {
|
||||
lfcnt++;
|
||||
}
|
||||
freecnt += lfcnt;
|
||||
(*blcnt)++;
|
||||
}
|
||||
return freecnt;
|
||||
}
|
||||
#endif
|
183
src/racket/gc2/gclist.h
Normal file
183
src/racket/gc2/gclist.h
Normal file
|
@ -0,0 +1,183 @@
|
|||
#ifndef GCLIST_H
|
||||
#define GCLIST_H
|
||||
|
||||
/* design take form the linux double linked list implementation in include/linux/list.h */
|
||||
|
||||
typedef struct GCList {
|
||||
struct GCList *next;
|
||||
struct GCList *prev;
|
||||
} GCList;
|
||||
|
||||
#define GCLIST_HEAD(name) GCList name = { &(name), &(name) }
|
||||
|
||||
static inline void gclist_init(GCList *list) {
|
||||
list->next = list;
|
||||
list->prev = list;
|
||||
}
|
||||
|
||||
static inline void __gclist_add(GCList *item, GCList *prev, GCList *next) {
|
||||
next->prev = item;
|
||||
item->next = next;
|
||||
item->prev = prev;
|
||||
prev->next = item;
|
||||
}
|
||||
|
||||
static inline void gclist_add(GCList *head, GCList *item) {
|
||||
__gclist_add(item, head, head->next);
|
||||
}
|
||||
|
||||
static inline void gclist_add_tail(GCList *head, GCList *item) {
|
||||
__gclist_add(item, head->prev, head);
|
||||
}
|
||||
|
||||
static inline void __gclist_del(GCList *prev, GCList *next) {
|
||||
next->prev = prev;
|
||||
prev->next = next;
|
||||
}
|
||||
|
||||
#define GCLIST_POISON1 ((void *)(0x00100100))
|
||||
#define GCLIST_POISON2 ((void *)(0x00200200))
|
||||
static inline void gclist_del(GCList *item) {
|
||||
__gclist_del(item->prev, item->next);
|
||||
item->next = GCLIST_POISON1;
|
||||
item->prev = GCLIST_POISON2;
|
||||
}
|
||||
|
||||
static inline int gclist_is_last(GCList *head, GCList *list) {
|
||||
return list->next == head;
|
||||
}
|
||||
|
||||
static inline int gclist_is_empty(GCList *head) {
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
static inline void gclist_move(GCList *list, GCList *head) {
|
||||
__gclist_del(list->prev, list->next);
|
||||
gclist_add(head, list);
|
||||
}
|
||||
|
||||
static inline void gclist_move_tail(GCList *list, GCList *head) {
|
||||
__gclist_del(list->prev, list->next);
|
||||
gclist_add(head, list);
|
||||
}
|
||||
|
||||
static inline void __gclist_splice(GCList *item, GCList *prev, GCList *next) {
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline void gclist_splice(GCList *head, GCList *list) {
|
||||
if(!gclist_is_empty(list)) { __gclist_splice(list, head, head->next); }
|
||||
}
|
||||
|
||||
#define gclist_item(ptr, type, member) \
|
||||
((type) (((void*)(ptr)) - ((void *) (&(((type) 0x0)->member)))))
|
||||
|
||||
#define gclist_first_item(head, type, member) \
|
||||
gclist_item((head)->next, type, member)
|
||||
|
||||
#define gclist_each_item(pos, head, type, member) \
|
||||
for (pos = gclist_item((head)->next, type, member); \
|
||||
&pos->member != (head); \
|
||||
pos = gclist_item(pos->member.next, type, member))
|
||||
|
||||
#define gclist_each_item_safe(pos, n, head, type, member) \
|
||||
for (pos = gclist_item((head)->next, type, member), \
|
||||
n = gclist_item(pos->member.next, type, member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, \
|
||||
n = gclist_item(n->member.next, type, member))
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* merge sort */
|
||||
typedef int (*GCListCmp)(void *priv, GCList *a, GCList *b);
|
||||
#define MAX_LIST_LENGTH_BITS 20
|
||||
|
||||
static GCList *merge(void *priv, GCListCmp cmp, GCList *a, GCList *b) {
|
||||
GCList head;
|
||||
GCList *tail = &head;
|
||||
|
||||
while(a && b) {
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a = a->next;
|
||||
}
|
||||
else {
|
||||
tail->next = b;
|
||||
b = b->next;
|
||||
}
|
||||
tail = tail->next;
|
||||
};
|
||||
|
||||
tail->next = a?:b;
|
||||
return head.next;
|
||||
}
|
||||
|
||||
static void merge_and_restore_back_links(void *priv, GCListCmp cmp, GCList *head, GCList *a, GCList *b) {
|
||||
GCList *tail = head;
|
||||
while(a && b) {
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a->prev = tail;
|
||||
a = a->next;
|
||||
}
|
||||
else {
|
||||
tail->next = b;
|
||||
b->prev = tail;
|
||||
b = b->next;
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
|
||||
tail->next = a?:b;
|
||||
|
||||
do {
|
||||
tail->next->prev = tail;
|
||||
tail = tail->next;
|
||||
} while(tail->next);
|
||||
|
||||
tail->next = head;
|
||||
head->prev = tail;
|
||||
}
|
||||
|
||||
static void gclist_sort(void *priv, GCList *head, GCListCmp cmp) {
|
||||
GCList *part[MAX_LIST_LENGTH_BITS+1];
|
||||
int level; /* index into part[] */
|
||||
int max_level = 0;
|
||||
GCList *list;
|
||||
|
||||
if (gclist_is_empty(head)) return;
|
||||
|
||||
memset(part, 0, sizeof(part));
|
||||
|
||||
head->prev->next = NULL; /* set end of list NULL */
|
||||
list = head->next; /* set list to first item in list */
|
||||
|
||||
while(list) {
|
||||
GCList *cur = list;
|
||||
list = list->next;
|
||||
cur->next = NULL;
|
||||
|
||||
for (level = 0; part[level]; level++) {
|
||||
cur = merge(priv, cmp, part[level], cur);
|
||||
part[level] = NULL;
|
||||
}
|
||||
if (level > max_level) {
|
||||
if (level > MAX_LIST_LENGTH_BITS) {
|
||||
printf("GCList is too long to sort");
|
||||
abort();
|
||||
}
|
||||
max_level = level;
|
||||
}
|
||||
part[level] = cur;
|
||||
}
|
||||
|
||||
for (level = 0; level < max_level; level ++) {
|
||||
if (part[level]) {
|
||||
list = merge(priv, cmp, part[level], list);
|
||||
}
|
||||
}
|
||||
|
||||
merge_and_restore_back_links(priv, cmp, head, part[max_level], list);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,21 +1,32 @@
|
|||
#include "commongc_internal.h"
|
||||
#include "gc2_obj.h"
|
||||
|
||||
#if defined(MZ_USE_PLACES)
|
||||
/*
|
||||
# define GC_DEBUG_PAGES
|
||||
# define MASTER_ALLOC_DEBUG
|
||||
# define KILLING_DEBUG
|
||||
*/
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct mpage {
|
||||
struct mpage *next;
|
||||
struct mpage *prev;
|
||||
void *addr;
|
||||
unsigned long previous_size; /* for med page, place to search for available block; for jit nursery, allocated size */
|
||||
unsigned long size; /* big page size, med page element size, or nursery starting point */
|
||||
unsigned char generation;
|
||||
/*
|
||||
unsigned char generation :1;
|
||||
unsigned char back_pointers :1;
|
||||
unsigned char big_page :2;
|
||||
unsigned char size_cless :2;
|
||||
unsigned char page_type :3;
|
||||
unsigned char marked_on :1;
|
||||
unsigned char has_new :1;
|
||||
unsigned char mprotected :1;
|
||||
unsigned char added :1;
|
||||
*/
|
||||
unsigned char generation ;
|
||||
unsigned char back_pointers ;
|
||||
unsigned char size_class ; /* 0 => small; 1 => med; 2 => big; 3 => big marked */
|
||||
unsigned char page_type ;
|
||||
|
@ -24,7 +35,10 @@ typedef struct mpage {
|
|||
unsigned char mprotected ;
|
||||
unsigned char added ;
|
||||
unsigned short live_size;
|
||||
#ifdef MZ_GC_BACKTRACE
|
||||
void **backtrace;
|
||||
#endif
|
||||
void *mmu_src_block;
|
||||
} mpage;
|
||||
|
||||
typedef struct Gen0 {
|
||||
|
@ -85,6 +99,13 @@ typedef struct Page_Range {
|
|||
unsigned long range_alloc_used;
|
||||
} Page_Range;
|
||||
|
||||
typedef struct {
|
||||
char *start;
|
||||
long len;
|
||||
short age;
|
||||
short zeroed;
|
||||
} AllocCacheBlock;
|
||||
|
||||
#ifdef MZ_USE_PLACES
|
||||
typedef struct NewGCMasterInfo {
|
||||
unsigned long size;
|
||||
|
@ -111,7 +132,6 @@ typedef struct NewGC {
|
|||
PageMap page_maps;
|
||||
/* All non-gen0 pages are held in the following structure. */
|
||||
struct mpage *gen1_pages[PAGE_TYPES];
|
||||
Page_Range *protect_range;
|
||||
|
||||
struct mpage *med_pages[NUM_MED_PAGE_SIZES];
|
||||
struct mpage *med_freelist_pages[NUM_MED_PAGE_SIZES];
|
||||
|
@ -209,6 +229,10 @@ typedef struct NewGC {
|
|||
GC_Weak_Box *weak_boxes;
|
||||
GC_Ephemeron *ephemerons;
|
||||
int num_last_seen_ephemerons;
|
||||
struct VM *vm;
|
||||
struct MMU *mmu;
|
||||
|
||||
#if defined(GC_DEBUG_PAGES)
|
||||
FILE *GCVERBOSEFH;
|
||||
#endif
|
||||
|
||||
} NewGC;
|
||||
|
|
|
@ -1,11 +1,65 @@
|
|||
|
||||
/*
|
||||
Provides:
|
||||
initialize_page_ranges
|
||||
flush_page_ranges
|
||||
add_page_range
|
||||
/*
|
||||
Provides:
|
||||
page_range_initialize
|
||||
page_range_add
|
||||
page_range_flush
|
||||
Requires:
|
||||
os_protect_pages
|
||||
*/
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
/* VirtualProtect can be used only on pages allocated at the same
|
||||
time, so we can't collapse ranges. */
|
||||
|
||||
# define initialize_protect_page_ranges(pr, b, s) /* */
|
||||
# define add_protect_page_range(pr, s, l, a, w) vm_protect_pages(s, l, w)
|
||||
# define flush_protect_page_ranges(pr, w) /* */
|
||||
|
||||
#else
|
||||
|
||||
static void page_range_compact(Page_Range *pr);
|
||||
static void page_range_reset(Page_Range *pr);
|
||||
static void page_range_flush(Page_Range *pr, int writeable);
|
||||
static int page_range_add_worker(Page_Range *pr, void *_start, unsigned long len);
|
||||
|
||||
|
||||
static Page_Range *page_range_create()
|
||||
{
|
||||
Page_Range *pr = ofm_malloc_zero(sizeof(Page_Range));
|
||||
pr->range_root = NULL;
|
||||
pr->range_start = NULL;
|
||||
pr->range_alloc_block = ofm_malloc(APAGE_SIZE);
|
||||
pr->range_alloc_size = APAGE_SIZE;
|
||||
pr->range_alloc_used = 0;
|
||||
return pr;
|
||||
}
|
||||
|
||||
static void page_range_add(Page_Range *pr, void *_start, unsigned long len, int writeable)
|
||||
{
|
||||
GC_MP_CNT_INC(mp_pr_add_cnt);
|
||||
if (!page_range_add_worker(pr, _start, len)) {
|
||||
GC_MP_CNT_INC(mp_pr_ff_cnt);
|
||||
page_range_flush(pr, writeable);
|
||||
page_range_add_worker(pr, _start, len);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void page_range_flush(Page_Range *pr, int writeable)
|
||||
{
|
||||
Range *work;
|
||||
|
||||
page_range_compact(pr);
|
||||
|
||||
for (work = pr->range_start; work; work = work->next) {
|
||||
os_protect_pages((void *)work->start, work->len, writeable);
|
||||
GC_MP_CNT_INC(mp_pr_call_cnt);
|
||||
}
|
||||
|
||||
page_range_reset(pr);
|
||||
}
|
||||
|
||||
#define Tree Range
|
||||
#define Splay_Item(t) (t)->start
|
||||
#define Set_Splay_Item(t, v) (t)->start = (v)
|
||||
|
@ -20,16 +74,7 @@
|
|||
#undef Splay_Item
|
||||
#undef Set_Splay_Item
|
||||
|
||||
static void initialize_page_ranges(Page_Range *pr, void *block, unsigned long size)
|
||||
{
|
||||
pr->range_root = NULL;
|
||||
pr->range_start = NULL;
|
||||
pr->range_alloc_block = block;
|
||||
pr->range_alloc_size = size;
|
||||
pr->range_alloc_used = 0;
|
||||
}
|
||||
|
||||
static void compact_page_ranges(Page_Range *pr)
|
||||
static void page_range_compact(Page_Range *pr)
|
||||
{
|
||||
Range *work, *next;
|
||||
unsigned long start, len;
|
||||
|
@ -52,7 +97,7 @@ static void compact_page_ranges(Page_Range *pr)
|
|||
}
|
||||
}
|
||||
|
||||
static void reset_page_ranges(Page_Range *pr)
|
||||
static void page_range_reset(Page_Range *pr)
|
||||
{
|
||||
pr->range_alloc_used = 0;
|
||||
pr->range_root = NULL;
|
||||
|
@ -77,14 +122,11 @@ static int try_extend(Range *r, unsigned long start, unsigned long len)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int add_page_range(Page_Range *pr, void *_start, unsigned long len, unsigned long alignment)
|
||||
static int page_range_add_worker(Page_Range *pr, void *_start, unsigned long len)
|
||||
{
|
||||
unsigned long start = (unsigned long)_start;
|
||||
Range *r, *range_root = pr->range_root;
|
||||
|
||||
len += (alignment - 1);
|
||||
len -= (len & (alignment - 1));
|
||||
|
||||
range_root = range_splay(start, range_root);
|
||||
|
||||
if (range_root) {
|
||||
|
@ -130,4 +172,4 @@ static int add_page_range(Page_Range *pr, void *_start, unsigned long len, unsig
|
|||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
/*
|
||||
Provides:
|
||||
Requires:
|
||||
[page_range.c exports]
|
||||
[page allocator]
|
||||
*/
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
/* VirtualProtect can be used only on pages allocated at the same
|
||||
time, so we can't collapse ranges. */
|
||||
|
||||
# define initialize_protect_page_ranges(pr, b, s) /* */
|
||||
# define add_protect_page_range(pr, s, l, a, w) vm_protect_pages(s, l, w)
|
||||
# define flush_protect_page_ranges(pr, w) /* */
|
||||
|
||||
#else
|
||||
|
||||
static void initialize_protect_page_ranges(Page_Range *protect_range, void *block, unsigned long size)
|
||||
{
|
||||
initialize_page_ranges(protect_range, block, size);
|
||||
}
|
||||
|
||||
static void flush_protect_page_ranges(Page_Range *protect_range, int writeable)
|
||||
{
|
||||
Range *work;
|
||||
|
||||
compact_page_ranges(protect_range);
|
||||
|
||||
for (work = protect_range->range_start; work; work = work->next) {
|
||||
vm_protect_pages((void *)work->start, work->len, writeable);
|
||||
}
|
||||
|
||||
reset_page_ranges(protect_range);
|
||||
}
|
||||
|
||||
static void add_protect_page_range(Page_Range *protect_range, void *_start, unsigned long len, unsigned long alignment, int writeable)
|
||||
{
|
||||
if (!add_page_range(protect_range, _start, len, alignment)) {
|
||||
flush_protect_page_ranges(protect_range, writeable);
|
||||
add_page_range(protect_range, _start, len, alignment);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,66 +1,236 @@
|
|||
|
||||
/******************************************************************************/
|
||||
/* OS-specific low-level allocator */
|
||||
/******************************************************************************/
|
||||
|
||||
/* TODO
|
||||
OSKIT and WINDOWS hard code os_pagesize to APAGE_SIZE
|
||||
*/
|
||||
|
||||
#ifndef GCPRINT
|
||||
# define GCPRINT fprintf
|
||||
# define GCOUTF stderr
|
||||
#endif
|
||||
|
||||
static inline size_t vm_round_up_to_page_size(size_t len, size_t page_size) {
|
||||
len += (page_size -1) - (len & (page_size - 1));
|
||||
return len;
|
||||
enum {
|
||||
MMU_WRITE_PROTECTED = 0,
|
||||
MMU_WRITABLE = 1,
|
||||
};
|
||||
|
||||
#if !( defined(_WIN32) || defined(OSKIT) )
|
||||
typedef struct {
|
||||
char *start;
|
||||
long len;
|
||||
short age;
|
||||
short zeroed;
|
||||
} FreeBlock;
|
||||
#ifdef MZ_USE_PLACES
|
||||
#define USE_BLOCK_CACHE
|
||||
#endif
|
||||
|
||||
typedef struct VM {
|
||||
#if !( defined(_WIN32) || defined(OSKIT) )
|
||||
FreeBlock *freeblocks;
|
||||
struct BlockCache;
|
||||
typedef struct MMU {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
struct BlockCache *block_cache;
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
AllocCacheBlock *alloc_caches[2];
|
||||
Page_Range *page_range;
|
||||
#endif
|
||||
size_t memory_allocated;
|
||||
} VM;
|
||||
long memory_allocated;
|
||||
size_t os_pagesize;
|
||||
NewGC *gc;
|
||||
} MMU;
|
||||
|
||||
static VM *vm_create() {
|
||||
VM *vm = ofm_malloc(sizeof(VM));
|
||||
memset(vm, 0, sizeof(VM));
|
||||
#if !( defined(_WIN32) || defined(OSKIT) )
|
||||
#define BLOCKFREE_CACHE_SIZE 96
|
||||
vm->freeblocks = ofm_malloc(sizeof(FreeBlock) * BLOCKFREE_CACHE_SIZE);
|
||||
memset(vm->freeblocks, 0, sizeof(FreeBlock) * BLOCKFREE_CACHE_SIZE);
|
||||
static void *os_alloc_pages(size_t len);
|
||||
static void os_free_pages(void *p, size_t len);
|
||||
static void os_protect_pages(void *p, size_t len, int writable);
|
||||
#else
|
||||
static void *os_alloc_pages(MMU *mmu, size_t len, size_t alignment, int dirty);
|
||||
static void os_free_pages(MMU *mmu, void *p, size_t len);
|
||||
static void os_protect_pages(void *p, size_t len, int writable);
|
||||
static void os_flush_freed_pages(MMU *mmu);
|
||||
#endif
|
||||
return vm;
|
||||
|
||||
/* provides */
|
||||
static inline size_t mmu_get_os_page_size(MMU *mmu) { return mmu->os_pagesize; }
|
||||
static size_t mmu_memory_allocated(MMU *mmu);
|
||||
|
||||
static inline size_t align_up(const size_t len, const size_t boundary) {
|
||||
const size_t modulo = (len & (boundary - 1));
|
||||
if (modulo)
|
||||
return len + (boundary - modulo);
|
||||
return len;
|
||||
}
|
||||
static inline void* align_up_ptr(const void *p, const size_t boundary) {
|
||||
return (void*) align_up((size_t) p, boundary);
|
||||
}
|
||||
|
||||
static void vm_free(VM *vm) {
|
||||
static inline size_t align_up_to_gc_pagesize(size_t len) {
|
||||
const size_t page_size = APAGE_SIZE;
|
||||
return align_up(len, page_size);
|
||||
}
|
||||
|
||||
static inline size_t mmu_round_up_to_os_page_size(MMU *mmu, size_t len) {
|
||||
const size_t page_size = mmu->os_pagesize;
|
||||
return align_up(len, page_size);
|
||||
}
|
||||
|
||||
static inline void mmu_assert_os_page_aligned(MMU *mmu, size_t p) {
|
||||
if (p & (mmu->os_pagesize - 1)) {
|
||||
printf("address or size is not OS PAGE ALIGNED!!!!");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
#include "block_cache.c"
|
||||
#include "alloc_cache.c"
|
||||
#include "page_range.c"
|
||||
#include <unistd.h>
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
#include "alloc_cache.c"
|
||||
#include "page_range.c"
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
|
||||
static MMU *mmu_create(NewGC *gc) {
|
||||
MMU *mmu = ofm_malloc_zero(sizeof(MMU));
|
||||
mmu->gc = gc;
|
||||
|
||||
#if !( defined(_WIN32) || defined(OSKIT) )
|
||||
free(vm->freeblocks);
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
mmu->block_cache = block_cache_create(mmu);
|
||||
#else
|
||||
/* initialization of page_range */
|
||||
mmu->page_range = page_range_create();
|
||||
|
||||
/* initialization of alloc_cache */
|
||||
mmu->alloc_caches[0] = alloc_cache_create();
|
||||
mmu->alloc_caches[1] = alloc_cache_create();
|
||||
#endif
|
||||
free(vm);
|
||||
|
||||
mmu->os_pagesize = getpagesize();
|
||||
#else
|
||||
mmu->os_pagesize = APAGE_SIZE;
|
||||
#endif
|
||||
|
||||
return mmu;
|
||||
}
|
||||
|
||||
static size_t vm_memory_allocated(VM *vm) {
|
||||
return vm->memory_allocated;
|
||||
static void mmu_free(MMU *mmu) {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
block_cache_free(mmu->block_cache);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
free(mmu->alloc_caches[0]);
|
||||
free(mmu->alloc_caches[1]);
|
||||
#endif
|
||||
free(mmu);
|
||||
}
|
||||
|
||||
static size_t vm_memory_allocated_inc(VM *vm, size_t len) {
|
||||
vm->memory_allocated += len;
|
||||
return vm->memory_allocated;
|
||||
static void *mmu_alloc_page(MMU* mmu, size_t len, size_t alignment, int dirty, int type, int expect_mprotect, void **src_block) {
|
||||
mmu_assert_os_page_aligned(mmu, len);
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
return block_cache_alloc_page(mmu->block_cache, len, alignment, dirty, type, expect_mprotect, src_block, &mmu->memory_allocated);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
//len = mmu_round_up_to_os_page_size(mmu, len);
|
||||
{
|
||||
AllocCacheBlock *alloc_cache = mmu->alloc_caches[!!expect_mprotect];
|
||||
return alloc_cache_alloc_page(alloc_cache, len, alignment, dirty, &mmu->memory_allocated);
|
||||
}
|
||||
#else
|
||||
return os_alloc_pages(mmu, len, alignment, dirty);
|
||||
#endif
|
||||
}
|
||||
|
||||
static size_t vm_memory_allocated_dec(VM *vm, size_t len) {
|
||||
vm->memory_allocated -= len;
|
||||
return vm->memory_allocated;
|
||||
static void mmu_free_page(MMU* mmu, void *p, size_t len, int type, int expect_mprotect, void **src_block) {
|
||||
mmu_assert_os_page_aligned(mmu, (size_t)p);
|
||||
mmu_assert_os_page_aligned(mmu, len);
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
mmu->memory_allocated += block_cache_free_page(mmu->block_cache, p, len, type, expect_mprotect, src_block);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
//len = mmu_round_up_to_os_page_size(mmu, len);
|
||||
{
|
||||
AllocCacheBlock *alloc_cache = mmu->alloc_caches[!!expect_mprotect];
|
||||
mmu->memory_allocated += alloc_cache_free_page(alloc_cache, p, len, MMU_DIRTY);
|
||||
}
|
||||
#else
|
||||
os_free_pages(mmu, p, len);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void mmu_flush_freed_pages(MMU *mmu) {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
mmu->memory_allocated += block_cache_flush_freed_pages(mmu->block_cache);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[0]);
|
||||
mmu->memory_allocated += alloc_cache_flush_freed_pages(mmu->alloc_caches[1]);
|
||||
#elif defined(_WIN32)
|
||||
os_flush_freed_pages(mmu);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void mmu_prep_for_compaction(MMU *mmu) {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
block_cache_prep_for_compaction(mmu->block_cache);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int mmu_should_compact_page(MMU *mmu, void **src_block) {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
return block_cache_compact(src_block);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mmu_write_unprotect_page(MMU *mmu, void *p, size_t len) {
|
||||
mmu_assert_os_page_aligned(mmu, (size_t)p);
|
||||
mmu_assert_os_page_aligned(mmu, len);
|
||||
os_protect_pages(p, len, 1);
|
||||
}
|
||||
|
||||
static void mmu_queue_protect_range(MMU *mmu, void *p, size_t len, int type, int writeable, void **src_block) {
|
||||
mmu_assert_os_page_aligned(mmu, (size_t)p);
|
||||
mmu_assert_os_page_aligned(mmu, len);
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
block_cache_queue_protect_range(mmu->block_cache, p, len, type, writeable, src_block);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
page_range_add(mmu->page_range, p, len, writeable);
|
||||
#else
|
||||
os_protect_pages(p, len, writeable);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void mmu_queue_write_protect_range(MMU *mmu, void *p, size_t len, int type, void **src_block) {
|
||||
mmu_queue_protect_range(mmu, p, len, type, MMU_WRITE_PROTECTED, src_block);
|
||||
}
|
||||
|
||||
static void mmu_queue_write_unprotect_range(MMU *mmu, void *p, size_t len, int type, void **src_block) {
|
||||
mmu_queue_protect_range(mmu, p, len, type, MMU_WRITABLE, src_block);
|
||||
}
|
||||
|
||||
static void mmu_flush_write_protect_ranges(MMU *mmu) {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
block_cache_flush_protect_ranges(mmu->block_cache, MMU_WRITE_PROTECTED);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
page_range_flush(mmu->page_range, MMU_WRITE_PROTECTED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void mmu_flush_write_unprotect_ranges(MMU *mmu) {
|
||||
#ifdef USE_BLOCK_CACHE
|
||||
block_cache_flush_protect_ranges(mmu->block_cache, MMU_WRITABLE);
|
||||
#elif !( defined(_WIN32) || defined(OSKIT) )
|
||||
page_range_flush(mmu->page_range, MMU_WRITABLE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static size_t mmu_memory_allocated(MMU *mmu) {
|
||||
return mmu->memory_allocated;
|
||||
}
|
||||
|
||||
#if ( defined(_WIN32) || defined(OSKIT) )
|
||||
page_range_flush(mmu->page_range, MMU_WRITABLE);
|
||||
static void mmu_memory_allocated_inc(MMU *mmu, long amt) {
|
||||
mmu->memory_allocated += amt;
|
||||
}
|
||||
static void mmu_memory_allocated_dec(MMU *mmu, long amt) {
|
||||
mmu->memory_allocated -= amt;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _WIN32 /* Windows */
|
||||
# include "vm_win.c"
|
||||
|
@ -68,6 +238,6 @@ static size_t vm_memory_allocated_dec(VM *vm, size_t len) {
|
|||
# include "vm_osk.c"
|
||||
#elif defined(OS_X) /* OS X */
|
||||
# include "vm_osx.c"
|
||||
#else /* Default: mmap */
|
||||
#else /* Default: mmap, linux, unix, freebsd*/
|
||||
# include "vm_mmap.c"
|
||||
#endif
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
static int page_size; /* OS page size */
|
||||
|
||||
static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
||||
static void *os_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
void *r;
|
||||
|
||||
|
@ -38,17 +38,13 @@ static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void vm_free_pages(VM *vm, void *p, size_t len)
|
||||
static void os_free_pages(VM *vm, void *p, size_t len)
|
||||
{
|
||||
vm_memory_allocated_dec(vm, len);
|
||||
free(p);
|
||||
}
|
||||
|
||||
static void vm_flush_freed_pages(VM *vm)
|
||||
{
|
||||
}
|
||||
|
||||
static void vm_protect_pages(void *p, size_t len, int writeable)
|
||||
static void os_protect_pages(void *p, size_t len, int writeable)
|
||||
{
|
||||
if (len & (page_size - 1)) {
|
||||
len += page_size - (len & (page_size - 1));
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
static void os_vm_free_pages(void *p, size_t len);
|
||||
static void *os_vm_alloc_pages(size_t len);
|
||||
static void vm_protect_pages(void *p, size_t len, int writeable);
|
||||
static void os_protect_pages(void *p, size_t len, int writeable);
|
||||
*/
|
||||
/* Requires: */
|
||||
/* Optional:
|
||||
|
@ -15,16 +15,14 @@ static void vm_protect_pages(void *p, size_t len, int writeable);
|
|||
#include <sys/mman.h>
|
||||
#include <errno.h>
|
||||
|
||||
static long page_size;
|
||||
|
||||
static void os_vm_free_pages(void *p, size_t len)
|
||||
static void os_free_pages(void *p, size_t len)
|
||||
{
|
||||
if (munmap(p, len)) {
|
||||
GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)p, (long)len, errno);
|
||||
}
|
||||
}
|
||||
|
||||
static void *os_vm_alloc_pages(size_t len)
|
||||
static void *os_alloc_pages(size_t len)
|
||||
{
|
||||
void *r;
|
||||
|
||||
|
@ -38,10 +36,6 @@ static void *os_vm_alloc_pages(size_t len)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Round up to nearest page: */
|
||||
if (len & (page_size - 1))
|
||||
len += page_size - (len & (page_size - 1));
|
||||
|
||||
#ifdef MAP_ANON
|
||||
r = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
#else
|
||||
|
@ -55,14 +49,9 @@ static void *os_vm_alloc_pages(size_t len)
|
|||
}
|
||||
|
||||
|
||||
static void vm_protect_pages(void *p, size_t len, int writeable)
|
||||
static void os_protect_pages(void *p, size_t len, int writeable)
|
||||
{
|
||||
if (len & (page_size - 1)) {
|
||||
len += page_size - (len & (page_size - 1));
|
||||
}
|
||||
|
||||
mprotect(p, len, (writeable ? (PROT_READ | PROT_WRITE) : PROT_READ));
|
||||
}
|
||||
|
||||
#include "alloc_cache.c"
|
||||
#include "rlimit_heapsize.c"
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
#include <oskit/c/malloc.h>
|
||||
|
||||
inline static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
||||
inline static void *os_malloc_pages(MMU *mmu, size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
void *p;
|
||||
|
||||
|
@ -18,21 +18,17 @@ inline static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int di
|
|||
if (!dirty_ok)
|
||||
memset(p, 0, len);
|
||||
|
||||
vm_memory_allocated_inc(vm, len);
|
||||
mmu_memory_allocated_inc(mmu, len);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static void vm_free_pages(VM *vm, void *p, size_t len)
|
||||
static void os_free_pages(MMU *mmu, void *p, size_t len)
|
||||
{
|
||||
vm_memory_allocated_dec(vm, len);
|
||||
mmu_memory_allocated_dec(mmu, len);
|
||||
sfree(p, len);
|
||||
}
|
||||
|
||||
static void vm_flush_freed_pages(VM *vm)
|
||||
{
|
||||
}
|
||||
|
||||
#ifndef DONT_NEED_MAX_HEAP_SIZE
|
||||
static unsigned long determine_max_heap_size(void)
|
||||
{
|
||||
|
|
|
@ -147,7 +147,7 @@ static mach_port_t task_self = 0;
|
|||
static mach_port_t exc_port = 0;
|
||||
|
||||
/* the VM subsystem as defined by the GC files */
|
||||
static void *os_vm_alloc_pages(size_t len)
|
||||
static void *os_alloc_pages(size_t len)
|
||||
{
|
||||
kern_return_t retval;
|
||||
void *r;
|
||||
|
@ -167,7 +167,7 @@ static void *os_vm_alloc_pages(size_t len)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void os_vm_free_pages(void *p, size_t len)
|
||||
static void os_free_pages(void *p, size_t len)
|
||||
{
|
||||
kern_return_t retval;
|
||||
|
||||
|
@ -178,7 +178,7 @@ static void os_vm_free_pages(void *p, size_t len)
|
|||
}
|
||||
}
|
||||
|
||||
static void vm_protect_pages(void *p, size_t len, int writeable)
|
||||
static void os_protect_pages(void *p, size_t len, int writeable)
|
||||
{
|
||||
kern_return_t retval;
|
||||
|
||||
|
@ -195,8 +195,6 @@ static void vm_protect_pages(void *p, size_t len, int writeable)
|
|||
}
|
||||
}
|
||||
|
||||
#include "alloc_cache.c"
|
||||
|
||||
#ifndef DONT_NEED_MAX_HEAP_SIZE
|
||||
|
||||
static unsigned long determine_max_heap_size()
|
||||
|
|
|
@ -20,7 +20,7 @@ typedef struct {
|
|||
static alloc_cache_entry cache[2][CACHE_SLOTS];
|
||||
#endif
|
||||
|
||||
static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
||||
static void *os_alloc_pages(MMU *mmu, size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
#if CACHE_SLOTS
|
||||
{
|
||||
|
@ -42,7 +42,7 @@ static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
|||
}
|
||||
#endif
|
||||
|
||||
vm_memory_allocated_inc(vm, len);
|
||||
mmu_memory_allocated_inc(mmu, len);
|
||||
|
||||
/* VirtualAlloc MEM_COMMIT always zeros memory */
|
||||
return (void *)VirtualAlloc(NULL, len,
|
||||
|
@ -50,7 +50,7 @@ static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
|
|||
PAGE_READWRITE);
|
||||
}
|
||||
|
||||
static void vm_free_pages(VM *vm, void *p, size_t len)
|
||||
static void os_free_pages(MMU *mmu, void *p, size_t len)
|
||||
{
|
||||
|
||||
#if CACHE_SLOTS
|
||||
|
@ -69,11 +69,11 @@ static void vm_free_pages(VM *vm, void *p, size_t len)
|
|||
}
|
||||
#endif
|
||||
|
||||
vm_memory_allocated_dec(vm, len);
|
||||
mmu_memory_allocated_dec(mmu, len);
|
||||
VirtualFree(p, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
static void vm_flush_freed_pages(VM *vm)
|
||||
static void os_flush_freed_pages(MMU *mmu)
|
||||
{
|
||||
#if CACHE_SLOTS
|
||||
int i;
|
||||
|
@ -83,7 +83,7 @@ static void vm_flush_freed_pages(VM *vm)
|
|||
if (cache[1][i].len) {
|
||||
for (p = cache[1][i].page; p; p = next) {
|
||||
next = *(void **)p;
|
||||
vm_memory_allocated_dec(vm, cache[i].len);
|
||||
mmu_memory_allocated_dec(mmu, cache[i].len);
|
||||
VirtualFree(p, 0, MEM_RELEASE);
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static void vm_flush_freed_pages(VM *vm)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void vm_protect_pages(void *p, size_t len, int writeable)
|
||||
static void os_protect_pages(void *p, size_t len, int writeable)
|
||||
{
|
||||
DWORD old;
|
||||
VirtualProtect(p, len, (writeable ? PAGE_READWRITE : PAGE_READONLY), &old);
|
||||
|
|
|
@ -515,7 +515,7 @@ Scheme_Object *scheme_places_deep_copy_worker(Scheme_Object *so, Scheme_Hash_Tab
|
|||
car = scheme_places_deep_copy_worker(SCHEME_CAR(so), ht);
|
||||
cdr = scheme_places_deep_copy_worker(SCHEME_CDR(so), ht);
|
||||
pair = scheme_make_pair(car, cdr);
|
||||
return pair;
|
||||
new_so = pair;
|
||||
}
|
||||
break;
|
||||
case scheme_vector_type:
|
||||
|
|
Loading…
Reference in New Issue
Block a user