More cleanup of the vm_* layer

svn: r12303
This commit is contained in:
Kevin Tew 2008-11-05 21:09:58 +00:00
parent 6fb0437222
commit 8220f558c3
10 changed files with 163 additions and 282 deletions

View File

@ -1,15 +1,12 @@
/*
Provides:
find_cached_pages --- same interface as malloc_pages
free_pages --- usual interface
flush_freed_pages --- usual interface
vm_free_pages --- usual interface
vm_flush_freed_pages --- usual interface
Requires (defined earlier):
system_free_pages --- called with len already rounded up to page size
page_size --- in bytes
my_qsort --- possibly from my_qsort.c
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
*/
@ -27,21 +24,6 @@ static void protect_pages(void *p, size_t len, int writable);
static void os_vm_free_pages(void *p, size_t len);
static void *os_vm_alloc_pages(size_t len);
*/
/* private utility functions */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok);
static void *malloc_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 1);
}
#define BLOCKFREE_UNMAP_AGE 1
static int compare_free_block(const void *a, const void *b)
@ -87,7 +69,6 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
blockfree[i].len = 0;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
}
@ -103,7 +84,6 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
blockfree[i].len -= len;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
@ -113,7 +93,6 @@ inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok
blockfree[i].len -= len;
if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
@ -172,13 +151,12 @@ static void return_mem_to_cache(void *p, size_t len, int zeroed)
ACTUALLY_FREEING_PAGES(len);
}
static void free_pages(void *p, size_t len)
static void vm_free_pages(void *p, size_t len)
{
LOGICALLY_FREEING_PAGES(len);
return_mem_to_cache(p, len, 0);
}
static void flush_freed_pages(void)
static void vm_flush_freed_pages(void)
{
int i;
Free_Block *blockfree = GC->blockfree;
@ -208,56 +186,47 @@ static void flush_freed_pages(void)
mechanism, but we do a bit of work to collapse adjacent pages in
the cache. */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
void *r;
if (!page_size)
page_size = getpagesize();
CHECK_USED_AGAINST_MAX(len);
/* Round up to nearest page: */
if (len & (page_size - 1))
len += page_size - (len & (page_size - 1));
/* Something from the cache, perhaps? */
r = find_cached_pages(len, alignment, dirty_ok);
if (r)
return r;
if(!r) {
/* attempt to allocate from OS */
r = os_vm_alloc_pages(len + alignment);
if(r == (void *)-1) { return NULL; }
r = os_vm_alloc_pages(len + alignment);
if (alignment) {
/* We allocated too large so we can choose the alignment. */
size_t extra = alignment;
void *real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
long pre_extra = real_r - r;
if (r == (void *)-1)
return NULL;
if (alignment) {
/* We allocated too large so we can choose the alignment. */
size_t extra;
void *real_r;
long pre_extra;
extra = alignment;
real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
pre_extra = real_r - r;
/* in front extra */
if (pre_extra) { os_vm_free_pages(r, pre_extra); }
/* in back extra exists */
if (pre_extra < extra) {
if (pre_extra == 0) {
/* Instead of actually unmapping, put it in the cache, and there's
a good chance we can use it next time: */
ACTUALLY_ALLOCATING_PAGES(extra);
return_mem_to_cache(real_r + len, extra, 1);
}
else { os_vm_free_pages(real_r + len, extra - pre_extra); }
/* in front extra */
if (pre_extra) { os_vm_free_pages(r, pre_extra); }
/* in back extra exists */
if (pre_extra < extra) {
if (pre_extra == 0) {
/* Instead of actually unmapping, put it in the cache, and there's
a good chance we can use it next time: */
ACTUALLY_ALLOCATING_PAGES(extra);
return_mem_to_cache(real_r + len, extra, 1);
}
else { os_vm_free_pages(real_r + len, extra - pre_extra); }
}
r = real_r;
}
r = real_r;
}
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
ACTUALLY_ALLOCATING_PAGES(len);
}
return r;
}

View File

@ -353,9 +353,7 @@ static MSet *sets[NUM_SETS]; /* First one is tagged, last one is atomic */
/********************* Statistics *********************/
static long page_allocations = 0;
static long page_reservations = 0;
#define LOGICALLY_ALLOCATING_PAGES(len) (page_allocations += len)
#define ACTUALLY_ALLOCATING_PAGES(len) (page_reservations += len)
#define LOGICALLY_FREEING_PAGES(len) (page_allocations -= len)
#define ACTUALLY_FREEING_PAGES(len) (page_reservations -= len)
static long memory_in_use, gc_threshold = GROW_ADDITION, max_memory_use;
@ -427,6 +425,19 @@ static int just_checking, the_size;
#define DONT_NEED_MAX_HEAP_SIZE
#include "vm.c"
static void *malloc_pages(size_t len, size_t alignment)
{
page_allocations += len;
return vm_malloc_pages(len, alignment, 0);
}
static void free_pages(void *p, size_t len)
{
page_allocations -= len;
vm_free_pages(p, len);
}
/******************************************************************************/
/* client setup */
/******************************************************************************/
@ -871,9 +882,9 @@ static void init_all_mpages(int young)
#if GENERATIONS
if (generations_available) {
if (page->flags & MFLAG_BIGBLOCK)
protect_pages((void *)p, page->u.size, 1);
vm_protect_pages((void *)p, page->u.size, 1);
else
protect_pages((void *)p, MPAGE_SIZE, 1);
vm_protect_pages((void *)p, MPAGE_SIZE, 1);
}
#endif
page->flags |= MFLAG_MODIFIED;
@ -935,9 +946,9 @@ static void init_all_mpages(int young)
#if GENERATIONS
if (generations_available) {
if (page->flags & MFLAG_BIGBLOCK)
protect_pages((void *)p, page->u.size, 1);
vm_protect_pages((void *)p, page->u.size, 1);
else
protect_pages((void *)p, MPAGE_SIZE, 1);
vm_protect_pages((void *)p, MPAGE_SIZE, 1);
}
#endif
page->flags |= MFLAG_MODIFIED;
@ -2587,7 +2598,7 @@ static void free_unused_mpages()
}
}
flush_freed_pages();
vm_flush_freed_pages();
}
void promote_all_ages()
@ -2618,9 +2629,9 @@ void protect_old_mpages()
p = page->block_start;
if (page->flags & MFLAG_BIGBLOCK)
protect_pages((void *)p, page->u.size, 0);
vm_protect_pages((void *)p, page->u.size, 0);
else
protect_pages((void *)p, MPAGE_SIZE, 0);
vm_protect_pages((void *)p, MPAGE_SIZE, 0);
}
}
}
@ -2665,9 +2676,9 @@ static int designate_modified_maybe(void *p, int no_barrier_ok)
page->flags |= MFLAG_MODIFIED;
p = (void *)((long)p & MPAGE_START);
if (page->flags & MFLAG_BIGBLOCK)
protect_pages(p, page->u.size, 1);
vm_protect_pages(p, page->u.size, 1);
else
protect_pages(p, MPAGE_SIZE, 1);
vm_protect_pages(p, MPAGE_SIZE, 1);
num_seg_faults++;
return 1;
}

View File

@ -178,15 +178,7 @@ inline static void check_used_against_max(size_t len)
}
}
inline static void free_used_pages(size_t len)
{
GC->used_pages -= (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);
}
#define CHECK_USED_AGAINST_MAX(len) check_used_against_max(len)
#define LOGICALLY_ALLOCATING_PAGES(len) /* empty */
#define ACTUALLY_ALLOCATING_PAGES(len) GC->actual_pages_size += len
#define LOGICALLY_FREEING_PAGES(len) free_used_pages(len)
#define ACTUALLY_FREEING_PAGES(len) GC->actual_pages_size -= len
#include "page_range.c"
@ -195,6 +187,25 @@ inline static void free_used_pages(size_t len)
#include "protect_range.c"
static void *malloc_pages(size_t len, size_t alignment)
{
check_used_against_max(len);
return vm_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
check_used_against_max(len);
return vm_malloc_pages(len, alignment, 1);
}
static void free_pages(void *p, size_t len)
{
GC->used_pages -= (len / APAGE_SIZE) + (((len % APAGE_SIZE) == 0) ? 0 : 1);
vm_free_pages(p, len);
}
/*****************************************************************************/
/* Memory Tracing, Part 1 */
/*****************************************************************************/
@ -524,7 +535,7 @@ inline static void *allocate(size_t sizeb, int type)
/* WARNING: tries to avoid a collection but
* malloc_pages can cause a collection due to check_used_against_max */
else if (GC->dumping_avoid_collection) {
struct mpage *new_mpage= gen0_create_new_mpage();
mpage *new_mpage = gen0_create_new_mpage();
/* push page */
new_mpage->next = GC->gen0.curr_alloc_page;
@ -1296,7 +1307,7 @@ int designate_modified(void *p)
if(page) {
if (!page->back_pointers) {
page->mprotected = 0;
protect_pages(page->addr, page->big_page ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
vm_protect_pages(page->addr, page->big_page ? round_to_apage_size(page->size) : APAGE_SIZE, 1);
page->back_pointers = 1;
return 1;
}
@ -1506,7 +1517,7 @@ void GC_mark(const void *const_p)
work->marked_on = 1;
if (work->mprotected) {
work->mprotected = 0;
protect_pages(work->addr, APAGE_SIZE, 1);
vm_protect_pages(work->addr, APAGE_SIZE, 1);
}
newplace = PTR(NUM(work->addr) + work->size);
} else {
@ -2004,7 +2015,7 @@ inline static void do_heap_compact(void)
if (npage->mprotected) {
npage->mprotected = 0;
protect_pages(npage->addr, APAGE_SIZE, 1);
vm_protect_pages(npage->addr, APAGE_SIZE, 1);
}
GCDEBUG((DEBUGOUTF,"Moving size %i object from %p to %p\n",
@ -2400,7 +2411,7 @@ static void garbage_collect(int force_full)
protect_old_pages();
TIME_STEP("protect");
if (gc->gc_full)
flush_freed_pages();
vm_flush_freed_pages();
reset_finalizer_tree();
TIME_STEP("reset");
@ -2412,7 +2423,7 @@ static void garbage_collect(int force_full)
/* If we have too many idle pages, flush: */
if (gc->actual_pages_size > ((gc->used_pages << (LOG_APAGE_SIZE + 1)))) {
flush_freed_pages();
vm_flush_freed_pages();
}
/* update some statistics */
@ -2542,10 +2553,10 @@ void GC_free_all(void)
next = work->next;
if (work->mprotected)
protect_pages(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
vm_protect_pages(work->addr, work->big_page ? round_to_apage_size(work->size) : APAGE_SIZE, 1);
gen1_free_mpage(pagemap, work);
}
}
flush_freed_pages();
vm_flush_freed_pages();
}

View File

@ -11,7 +11,7 @@
time, so we can't collapse ranges. */
# define initialize_protect_page_ranges(pr, b, s) /* */
# define add_protect_page_range(pr, s, l, a, w) protect_pages(s, l, w)
# define add_protect_page_range(pr, s, l, a, w) vm_protect_pages(s, l, w)
# define flush_protect_page_ranges(pr, w) /* */
#else
@ -28,7 +28,7 @@ static void flush_protect_page_ranges(Page_Range *protect_range, int writeable)
compact_page_ranges(protect_range);
for (work = protect_range->range_start; work; work = work->next) {
protect_pages((void *)work->start, work->len, writeable);
vm_protect_pages((void *)work->start, work->len, writeable);
}
reset_page_ranges(protect_range);

View File

@ -3,39 +3,17 @@
/* OS-specific low-level allocator */
/******************************************************************************/
/******************************************************************************/
/* Windows */
#ifndef GCPRINT
# define GCPRINT fprintf
# define GCOUTF stderr
#endif
#if _WIN32
#if _WIN32 /* Windows */
# include "vm_win.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* OSKit */
#if OSKIT
#elif defined(OSKIT) /* OSKit */
# include "vm_osk.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* OS X */
#if defined(OS_X)
# if GENERATIONS
static int designate_modified(void *p);
# endif
# define TEST 0
#elif defined(OS_X) /* OS X */
# include "vm_osx.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* Default: mmap */
#ifndef MALLOCATOR_DEFINED
#else /* Default: mmap */
# include "vm_mmap.c"
#endif

View File

@ -3,12 +3,9 @@
posix_memalign-based allocator
determine_max_heap_size() (uses rlimit_heapsize.c)
Requires:
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
CHECK_USED_AGAINST_MAX(len)
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
@ -17,21 +14,15 @@
#include <sys/mman.h>
#include <errno.h>
#ifndef CHECK_USED_AGAINST_MAX
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
static int page_size; /* OS page size */
static void *malloc_dirty_pages(size_t len, size_t alignment)
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
void *r;
if (!page_size)
page_size = getpagesize();
CHECK_USED_AGAINST_MAX(len);
/* Round up to nearest page: */
if (len & (page_size - 1))
len += page_size - (len & (page_size - 1));
@ -43,31 +34,23 @@ static void *malloc_dirty_pages(size_t len, size_t alignment)
}
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
if(!dirty_ok)
memset(p, 0, len);
return r;
}
static void *malloc_pages(size_t len, size_t alignment)
{
void *p;
p = malloc_dirty_pages(len, alignment);
memset(p, 0, len);
return p;
}
static void free_pages(void *p, size_t len)
static void vm_free_pages(void *p, size_t len)
{
ACTUALLY_FREEING_PAGES(len);
LOGICALLY_FREEING_PAGES(len);
free(p);
}
static void flush_freed_pages(void)
static void vm_flush_freed_pages(void)
{
}
static void protect_pages(void *p, size_t len, int writeable)
static void vm_protect_pages(void *p, size_t len, int writeable)
{
if (len & (page_size - 1)) {
len += page_size - (len & (page_size - 1));

View File

@ -2,13 +2,10 @@
/*
static void os_vm_free_pages(void *p, size_t len);
static void *os_vm_alloc_pages(size_t len);
static void protect_pages(void *p, size_t len, int writeable);
static void vm_protect_pages(void *p, size_t len, int writeable);
*/
/* Requires: */
/* Optional:
CHECK_USED_AGAINST_MAX(len)
GCPRINT
GCOUTF
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
@ -18,19 +15,7 @@ static void protect_pages(void *p, size_t len, int writeable);
#include <sys/mman.h>
#include <errno.h>
#ifndef GCPRINT
# define GCPRINT fprintf
# define GCOUTF stderr
#endif
#ifndef CHECK_USED_AGAINST_MAX
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
static long page_size;
#ifndef MAP_ANON
static int fd;
static int fd_created;
#endif
static void os_vm_free_pages(void *p, size_t len)
{
@ -44,6 +29,9 @@ static void *os_vm_alloc_pages(size_t len)
void *r;
#ifndef MAP_ANON
static int fd;
static int fd_created;
if (!fd_created) {
fd_created = 1;
fd = open("/dev/zero", O_RDWR);
@ -67,7 +55,7 @@ static void *os_vm_alloc_pages(size_t len)
}
static void protect_pages(void *p, size_t len, int writeable)
static void vm_protect_pages(void *p, size_t len, int writeable)
{
if (len & (page_size - 1)) {
len += page_size - (len & (page_size - 1));

View File

@ -3,68 +3,42 @@
allocator
determine_max_heap_size()
Requires:
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
CHECK_USED_AGAINST_MAX(len)
GCPRINT
GCOUTF
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
#include <oskit/c/malloc.h>
#ifndef GCPRINT
# define GCPRINT fprintf
# define GCOUTF stderr
#endif
#ifndef CHECK_USED_AGAINST_MAX
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
inline static void *malloc_dirty_pages(size_t len, size_t alignment)
inline static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
void *p;
CHECK_USED_AGAINST_MAX(len);
p = smemalign(alignment, len);
if (!dirty_ok)
memset(p, 0, len);
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
return p;
}
inline static void *malloc_pages(size_t len, size_t alignment)
{
void *p;
p = malloc_dirty_pages(len, alignment);
memset(p, 0, len);
return p;
}
static void free_pages(void *p, size_t len)
static void vm_free_pages(void *p, size_t len)
{
sfree(p, len);
LOGICALLY_FREEING_PAGES(len);
ACTUALLY_FREEING_PAGES(len);
}
static void flush_freed_pages(void)
static void vm_flush_freed_pages(void)
{
}
#ifndef DONT_NEED_MAX_HEAP_SIZE
typedef unsigned long size_type;
static size_type determine_max_heap_size(void)
static unsigned long determine_max_heap_size(void)
{
GCPRINT(GCOUTF,
"Don't know how to get heap size for OSKit: assuming 1GB\n");
GCPRINT(GCOUTF, "Don't know how to get heap size for OSKit: assuming 1GB\n");
return (1 * 1024 * 1024 * 1024);
}
#endif

View File

@ -8,11 +8,7 @@
TEST = 0
GENERATIONS --- zero or non-zero
designate_modified --- when GENERATIONS is non-zero
my_qsort (for alloc_cache.c)
Optional:
CHECK_USED_AGAINST_MAX(len)
GCPRINT
GCOUTF
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
@ -30,9 +26,12 @@
# include <pthread.h>
#endif
# if GENERATIONS
static int designate_modified(void *p);
# endif
# define TEST 0
#ifndef TEST
# define TEST 1
# include "my_qsort.c"
int designate_modified(void *p);
#endif
@ -46,18 +45,6 @@ int designate_modified(void *p);
# define ARCH_THREAD_STATE_COUNT i386_THREAD_STATE_COUNT
#endif
#ifndef GCPRINT
# define GCPRINT fprintf
# define GCOUTF stderr
#endif
#ifndef CHECK_USED_AGAINST_MAX
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
/* Forward declarations: */
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
static void free_actual_pages(void *p, size_t len, int zeroed);
/* the structure of an exception msg and its reply */
typedef struct rep_msg {
mach_msg_header_t head;
@ -357,11 +344,11 @@ char *big_page = NULL;
int designate_modified(void *p)
{
if((p >= normal_page) && (p < (normal_page + MPAGE_SIZE))) {
protect_pages(p, MPAGE_SIZE, 1);
vm_protect_pages(p, MPAGE_SIZE, 1);
return 1;
}
if((p >= big_page) && (p < (big_page + BPAGE_SIZE))) {
protect_pages(p, BPAGE_SIZE, 1);
vm_protect_pages(p, BPAGE_SIZE, 1);
return 1;
}
printf("Unrecognized write: %p\n", p);
@ -372,14 +359,14 @@ int main(int argc, char **argv)
{
macosx_init_exception_handler();
printf("Allocating test pages:\n");
normal_page = malloc_pages(MPAGE_SIZE, MPAGE_SIZE);
normal_page = vm_malloc_pages(MPAGE_SIZE, MPAGE_SIZE,0);
printf(" ... normal page at %p\n", normal_page);
big_page = malloc_pages(BPAGE_SIZE, MPAGE_SIZE);
big_page = vm_malloc_pages(BPAGE_SIZE, MPAGE_SIZE,0);
printf(" ... big page at %p\n", big_page);
printf("Setting protection on test pages\n");
protect_pages(normal_page, MPAGE_SIZE, 0);
vm_protect_pages(normal_page, MPAGE_SIZE, 0);
printf(" ... normal page %p set\n", normal_page);
protect_pages(big_page, MPAGE_SIZE, 0);
vm_protect_pages(big_page, MPAGE_SIZE, 0);
printf(" ... big page %p set\n", big_page);
printf("Writing to test pages\n");
normal_page[2] = 'A';
@ -388,9 +375,9 @@ int main(int argc, char **argv)
printf(" ... normal_page %p's second byte is %c\n", normal_page, normal_page[2]);
printf(" ... big_page %p's second byte is %c\n", big_page, big_page[2]);
printf("Freeing test pages:\n");
free_pages(normal_page, MPAGE_SIZE);
vm_free_pages(normal_page, MPAGE_SIZE);
printf(" ... freed normal page\n");
free_pages(big_page, MPAGE_SIZE);
vm_free_pages(big_page, MPAGE_SIZE);
printf(" ... freed big page\n");
}
#endif

View File

@ -3,25 +3,12 @@
allocator
determine_max_heap_size()
Requires:
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
CHECK_USED_AGAINST_MAX(len)
GCPRINT
GCOUTF
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
#ifndef GCPRINT
# define GCPRINT fprintf
# define GCOUTF stderr
#endif
#ifndef CHECK_USED_AGAINST_MAX
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
/* Cache doesn't seem to help in Windows: */
#define CACHE_SLOTS 0
@ -35,55 +22,50 @@ typedef struct {
static alloc_cache_entry cache[2][CACHE_SLOTS];
#endif
static void *malloc_pages(size_t len, size_t alignment)
static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
CHECK_USED_AGAINST_MAX(len);
LOGICALLY_ALLOCATING_PAGES(len);
#if CACHE_SLOTS
{
int i, j;
for (j = 0; j < 2; j++) {
for (i = 0; i < CACHE_SLOTS; i++) {
if (cache[j][i].len == len) {
if (cache[j][i].page) {
void *result = cache[j][i].page;
cache[j][i].page = *(void **)result;
memset(result, 0, len);
return result;
}
break;
}
}
}
}
{
int i, j;
for (j = 0; j < 2; j++) {
for (i = 0; i < CACHE_SLOTS; i++) {
if (cache[j][i].len == len) {
if (cache[j][i].page) {
void *result = cache[j][i].page;
cache[j][i].page = *(void **)result;
memset(result, 0, len);
return result;
}
break;
}
}
}
}
#endif
ACTUALLY_ALLOCATING_PAGES(len);
/* VirtualAlloc MEM_COMMIT always zeros memory */
return (void *)VirtualAlloc(NULL, len,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
}
#define malloc_dirty_pages(size,align) malloc_pages(size,align)
static void free_pages(void *p, size_t len)
static void vm_free_pages(void *p, size_t len)
{
LOGICALLY_FREEING_PAGES(len);
#if CACHE_SLOTS
{
int i;
for (i = 0; i < CACHE_SLOTS; i++) {
if (!cache[0][i].len)
cache[0][i].len = len;
cache[0][i].len = len;
if (cache[0][i].len == len) {
*(void **)p = cache[0][i].page;
cache[0][i].page = p;
return;
*(void **)p = cache[0][i].page;
cache[0][i].page = p;
return;
}
}
}
@ -94,7 +76,7 @@ static void free_pages(void *p, size_t len)
VirtualFree(p, 0, MEM_RELEASE);
}
static void flush_freed_pages(void)
static void vm_flush_freed_pages(void)
{
#if CACHE_SLOTS
int i;
@ -103,9 +85,9 @@ static void flush_freed_pages(void)
for (i = 0; i < CACHE_SLOTS; i++) {
if (cache[1][i].len) {
for (p = cache[1][i].page; p; p = next) {
next = *(void **)p;
ACTUALLY_FREEING_PAGES(cache[i].len);
VirtualFree(p, 0, MEM_RELEASE);
next = *(void **)p;
ACTUALLY_FREEING_PAGES(cache[i].len);
VirtualFree(p, 0, MEM_RELEASE);
}
}
cache[1][i].len = cache[0][i].len;
@ -116,7 +98,7 @@ static void flush_freed_pages(void)
#endif
}
static void protect_pages(void *p, size_t len, int writeable)
static void vm_protect_pages(void *p, size_t len, int writeable)
{
DWORD old;
VirtualProtect(p, len, (writeable ? PAGE_READWRITE : PAGE_READONLY), &old);
@ -126,28 +108,26 @@ static void protect_pages(void *p, size_t len, int writeable)
typedef unsigned long size_type;
typedef BOOL (WINAPI * QueryInformationJobObject_Proc)(HANDLE hJob,
JOBOBJECTINFOCLASS JobObjectInfoClass,
LPVOID lpJobObjectInfo,
DWORD cbJobObjectInfoLength,
LPDWORD lpReturnLength);
JOBOBJECTINFOCLASS JobObjectInfoClass,
LPVOID lpJobObjectInfo,
DWORD cbJobObjectInfoLength,
LPDWORD lpReturnLength);
static size_type determine_max_heap_size(void)
{
QueryInformationJobObject_Proc qijo;
JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;
HMODULE hm;
SYSTEM_INFO si;
hm = LoadLibrary("kernel32.dll");
if (hm)
qijo = (QueryInformationJobObject_Proc)GetProcAddress(hm, "QueryInformationJobObject");
else
qijo = NULL;
if (qijo) {
if (hm) {
DWORD size;
if (qijo(NULL, JobObjectExtendedLimitInformation, &info, sizeof(info), &size)) {
if (info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_PROCESS_MEMORY) {
return info.ProcessMemoryLimit;
QueryInformationJobObject_Proc qijo = NULL;
qijo = (QueryInformationJobObject_Proc)GetProcAddress(hm, "QueryInformationJobObject");
if (qijo) {
if (qijo(NULL, JobObjectExtendedLimitInformation, &info, sizeof(info), &size)) {
if (info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_PROCESS_MEMORY) {
return info.ProcessMemoryLimit;
}
}
}
}