Reorg of gc2/vm_* and alloc_cache

svn: r12255
This commit is contained in:
Kevin Tew 2008-11-05 21:04:54 +00:00
parent aac8d83084
commit 3b3197ecc6
4 changed files with 150 additions and 190 deletions

View File

@ -13,10 +13,36 @@
ACTUALLY_FREEING_PAGES(len)
*/
/* interface to GC */
static void *malloc_pages(size_t len, size_t alignment);
static void *malloc_dirty_pages(size_t len, size_t alignment);
static void free_pages(void *p, size_t len);
static void flush_freed_pages(void);
static void protect_pages(void *p, size_t len, int writable);
/* interface to OS */
static void os_vm_free_pages(void *p, size_t len);
static void *os_vm_alloc_pages(size_t len);
/* private utility functions */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok);
static void *malloc_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 1);
}
typedef struct {
void *start;
long len;
short age, zeroed;
short age;
short zeroed;
} Free_Block;
#define BLOCKFREE_UNMAP_AGE 1
@ -44,7 +70,7 @@ static void collapse_adjacent_pages(void)
blockfree[i].start = NULL;
blockfree[i].len = 0;
if (!blockfree[i].zeroed)
blockfree[j].zeroed = 0;
blockfree[j].zeroed = 0;
} else
j = i;
}
@ -60,12 +86,12 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
if (blockfree[i].len == len) {
r = blockfree[i].start;
if (!alignment || !((unsigned long)r & (alignment - 1))) {
blockfree[i].start = NULL;
blockfree[i].len = 0;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
blockfree[i].start = NULL;
blockfree[i].len = 0;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
}
}
@ -76,26 +102,26 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
/* Align at start? */
r = blockfree[i].start;
if (!alignment || !((unsigned long)r & (alignment - 1))) {
blockfree[i].start += len;
blockfree[i].len -= len;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
blockfree[i].start += len;
blockfree[i].len -= len;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
/* Align at end? */
r = blockfree[i].start + (blockfree[i].len - len);
if (!((unsigned long)r & (alignment - 1))) {
blockfree[i].len -= len;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
blockfree[i].len -= len;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
/* We don't try a middle alignment, because that would
split the block into three. */
split the block into three. */
}
}
@ -103,7 +129,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
return NULL;
}
static void free_actual_pages(void *p, size_t len, int zeroed)
static void return_mem_to_cache(void *p, size_t len, int zeroed)
{
int i;
@ -116,17 +142,17 @@ static void free_actual_pages(void *p, size_t len, int zeroed)
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++)
if(blockfree[i].start && (blockfree[i].len < (1024 * 1024))) {
if (p == blockfree[i].start + blockfree[i].len) {
blockfree[i].len += len;
if (!zeroed)
blockfree[i].zeroed = 0;
return;
blockfree[i].len += len;
if (!zeroed)
blockfree[i].zeroed = 0;
return;
}
if (p + len == blockfree[i].start) {
blockfree[i].start = p;
blockfree[i].len += len;
if (!zeroed)
blockfree[i].zeroed = 0;
return;
blockfree[i].start = p;
blockfree[i].len += len;
if (!zeroed)
blockfree[i].zeroed = 0;
return;
}
}
@ -143,7 +169,7 @@ static void free_actual_pages(void *p, size_t len, int zeroed)
/* Might help next time around: */
collapse_adjacent_pages();
system_free_pages(p, len);
os_vm_free_pages(p, len);
ACTUALLY_FREEING_PAGES(len);
}
@ -151,7 +177,7 @@ static void free_actual_pages(void *p, size_t len, int zeroed)
static void free_pages(void *p, size_t len)
{
LOGICALLY_FREEING_PAGES(len);
free_actual_pages(p, len, 0);
return_mem_to_cache(p, len, 0);
}
static void flush_freed_pages(void)
@ -163,12 +189,76 @@ static void flush_freed_pages(void)
for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
if (blockfree[i].start) {
if (blockfree[i].age == BLOCKFREE_UNMAP_AGE) {
system_free_pages(blockfree[i].start, blockfree[i].len);
ACTUALLY_FREEING_PAGES(blockfree[i].len);
blockfree[i].start = NULL;
blockfree[i].len = 0;
os_vm_free_pages(blockfree[i].start, blockfree[i].len);
ACTUALLY_FREEING_PAGES(blockfree[i].len);
blockfree[i].start = NULL;
blockfree[i].len = 0;
} else
blockfree[i].age++;
blockfree[i].age++;
}
}
}
/* Instead of immediately freeing pages with munmap---only to mmap
them again---we cache BLOCKFREE_CACHE_SIZE freed pages. A page is
cached unused for at most BLOCKFREE_UNMAP_AGE cycles of the
collector. (A max age of 1 seems useful, anything more seems
dangerous.)
The cache is small enough that we don't need an elaborate search
mechanism, but we do a bit of work to collapse adjacent pages in
the cache. */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
void *r;
if (!page_size)
page_size = getpagesize();
CHECK_USED_AGAINST_MAX(len);
/* Round up to nearest page: */
if (len & (page_size - 1))
len += page_size - (len & (page_size - 1));
/* Something from the cache, perhaps? */
r = find_cached_pages(len, alignment, dirty_ok);
if (r)
return r;
r = os_vm_alloc_pages(len + alignment);
if (r == (void *)-1)
return NULL;
if (alignment) {
/* We allocated too large so we can choose the alignment. */
size_t extra;
void *real_r;
long pre_extra;
extra = alignment;
real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
pre_extra = real_r - r;
/* in front extra */
if (pre_extra) { os_vm_free_pages(r, pre_extra); }
/* in back extra exists */
if (pre_extra < extra) {
if (pre_extra == 0) {
/* Instead of actually unmapping, put it in the cache, and there's
a good chance we can use it next time: */
ACTUALLY_ALLOCATING_PAGES(extra);
return_mem_to_cache(real_r + len, extra, 1);
}
else { os_vm_free_pages(real_r + len, extra - pre_extra); }
}
r = real_r;
}
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}

View File

@ -1,14 +1,9 @@
/*
Provides:
mmap-based allocator (uses alloc_cache.c)
determine_max_heap_size()
Requires:
my_qsort (for alloc_cache.c)
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
/* Provides: */
static void os_vm_free_pages(void *p, size_t len);
static void *os_vm_alloc_pages(size_t len);
static void protect_pages(void *p, size_t len, int writeable);
/* Requires: */
/* Optional:
CHECK_USED_AGAINST_MAX(len)
GCPRINT
GCOUTF
@ -29,32 +24,22 @@
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
static int page_size; /* OS page size */
static long page_size;
#ifndef MAP_ANON
static int fd, fd_created;
static int fd;
static int fd_created;
#endif
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
static void free_actual_pages(void *p, size_t len, int zeroed);
static void os_vm_free_pages(void *p, size_t len)
{
if (munmap(p, len)) {
GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)p, (long)len, errno);
}
}
/* Instead of immediately freeing pages with munmap---only to mmap
them again---we cache BLOCKFREE_CACHE_SIZE freed pages. A page is
cached unused for at most BLOCKFREE_UNMAP_AGE cycles of the
collector. (A max age of 1 seems useful, anything more seems
dangerous.)
The cache is small enough that we don't need an elaborate search
mechanism, but we do a bit of work to collapse adjacent pages in
the cache. */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
static void *os_vm_alloc_pages(size_t len)
{
void *r;
size_t extra = 0;
if (!page_size)
page_size = getpagesize();
#ifndef MAP_ANON
if (!fd_created) {
@ -63,75 +48,22 @@ static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
}
#endif
CHECK_USED_AGAINST_MAX(len);
/* Round up to nearest page: */
if (len & (page_size - 1))
len += page_size - (len & (page_size - 1));
/* Something from the cache, perhaps? */
r = find_cached_pages(len, alignment, dirty_ok);
if (r)
return r;
extra = alignment;
#ifdef MAP_ANON
r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
r = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
r = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
#endif
if (r == (void *)-1)
return NULL;
if (extra) {
/* We allocated too large so we can choose the alignment. */
void *real_r;
long pre_extra;
real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
pre_extra = real_r - r;
if (pre_extra)
if (munmap(r, pre_extra))
GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
if (pre_extra < extra) {
if (!pre_extra) {
/* Instead of actually unmapping, put it in the cache, and there's
a good chance we can use it next time: */
ACTUALLY_ALLOCATING_PAGES(extra);
free_actual_pages(real_r + len, extra, 1);
} else {
if (munmap(real_r + len, extra - pre_extra))
GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
}
}
r = real_r;
}
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
static void *malloc_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 1);
}
static void system_free_pages(void *p, size_t len)
{
if (munmap(p, len)) {
GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)p, (long)len, errno);
}
}
static void protect_pages(void *p, size_t len, int writeable)
{
@ -142,8 +74,5 @@ static void protect_pages(void *p, size_t len, int writeable)
mprotect(p, len, (writeable ? (PROT_READ | PROT_WRITE) : PROT_READ));
}
# include "alloc_cache.c"
/*************************************************************/
# include "rlimit_heapsize.c"
#include "alloc_cache.c"
#include "rlimit_heapsize.c"

View File

@ -48,7 +48,6 @@ inline static void *malloc_pages(size_t len, size_t alignment)
static void free_pages(void *p, size_t len)
{
free_used_pages(len);
sfree(p, len);
LOGICALLY_FREEING_PAGES(len);

View File

@ -9,10 +9,6 @@
GENERATIONS --- zero or non-zero
designate_modified --- when GENERATIONS is non-zero
my_qsort (for alloc_cache.c)
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
CHECK_USED_AGAINST_MAX(len)
GCPRINT
@ -93,81 +89,27 @@ static mach_port_t task_self = 0;
static mach_port_t exc_port = 0;
/* the VM subsystem as defined by the GC files */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
static void *os_vm_alloc_pages(size_t len)
{
kern_return_t retval;
size_t extra = 0;
void *r;
if(!task_self) task_self = mach_task_self();
CHECK_USED_AGAINST_MAX(len);
/* round up to the nearest page: */
if(len & (page_size - 1))
len += page_size - (len & (page_size - 1));
r = find_cached_pages(len, alignment, dirty_ok);
if (r)
return r;
extra = alignment;
retval = vm_allocate(task_self, (vm_address_t*)&r, len + extra, TRUE);
retval = vm_allocate(task_self, (vm_address_t*)&r, len, TRUE);
if(retval != KERN_SUCCESS) {
GCPRINT(GCOUTF, "Couldn't allocate memory: %s\n", mach_error_string(retval));
abort();
}
if(extra) {
/* we allocated too large so we can choose the alignment */
void *real_r;
long pre_extra;
real_r = (void*)(((unsigned long)r + (alignment-1)) & (~(alignment-1)));
pre_extra = real_r - r;
if(pre_extra) {
retval = vm_deallocate(task_self, (vm_address_t)r, pre_extra);
if(retval != KERN_SUCCESS) {
GCPRINT(GCOUTF, "WARNING: couldn't deallocate pre-extra: %s\n",
mach_error_string(retval));
}
}
if(pre_extra < extra) {
if (!pre_extra) {
/* Instead of actually unmapping, put it in the cache, and there's
a good chance we can use it next time: */
ACTUALLY_ALLOCATING_PAGES(extra);
free_actual_pages(real_r + len, extra, 1);
} else {
retval = vm_deallocate(task_self, (vm_address_t)real_r + len,
extra - pre_extra);
if(retval != KERN_SUCCESS) {
GCPRINT(GCOUTF, "WARNING: couldn't deallocate post-extra: %s\n",
mach_error_string(retval));
}
}
}
r = real_r;
}
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
static void *malloc_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 1);
}
static void system_free_pages(void *p, size_t len)
static void os_vm_free_pages(void *p, size_t len)
{
kern_return_t retval;