add support for posix_memalign as the low-level allocator (because mmap doesn't work with glibc's malloc)

svn: r5516
This commit is contained in:
Matthew Flatt 2007-01-31 05:05:28 +00:00
parent 81ce545d63
commit ebb8d0f39a
7 changed files with 196 additions and 104 deletions

View File

@ -418,43 +418,7 @@ static int just_checking, the_size;
/******************************************************************************/
#define DONT_NEED_MAX_HEAP_SIZE
/******************************************************************************/
/* Windows */
#if _WIN32
# include "vm_win.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* OSKit */
#if OSKIT
# include "vm_osk.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* OS X */
#if defined(OS_X)
# if GENERATIONS
static int designate_modified(void *p);
# endif
# define TEST 0
# include "vm_osx.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* Default: mmap */
#ifndef MALLOCATOR_DEFINED
# include "vm_mmap.c"
#endif
#include "vm.c"
/******************************************************************************/
/* client setup */

View File

@ -167,26 +167,7 @@ inline static void free_used_pages(size_t len)
#include "page_range.c"
#if _WIN32
# include "vm_win.c"
# define MALLOCATOR_DEFINED
#endif
#if defined(__APPLE__) && defined(__MACH__)
# define TEST 0
int designate_modified(void *p);
# include "vm_osx.c"
# define MALLOCATOR_DEFINED
#endif
#if OSKIT
# include "vm_osk.c"
# define MALLOCATOR_DEFINED
#endif
#ifndef MALLOCATOR_DEFINED
# include "vm_mmap.c"
#endif
#include "vm.c"
#include "protect_range.c"

View File

@ -0,0 +1,22 @@
#ifndef DONT_NEED_MAX_HEAP_SIZE
# include <sys/time.h>
# include <sys/resource.h>
# include <unistd.h>
typedef unsigned long size_type;
static size_type determine_max_heap_size(void)
{
struct rlimit rlim;
# ifdef OS_X
getrlimit(RLIMIT_RSS, &rlim);
# else
getrlimit(RLIMIT_DATA, &rlim);
# endif
return (rlim.rlim_cur == RLIM_INFINITY) ? (1024 * 1024 * 1024) : rlim.rlim_cur;
}
#endif

41
src/mzscheme/gc2/vm.c Normal file
View File

@ -0,0 +1,41 @@
/******************************************************************************/
/* OS-specific low-level allocator */
/******************************************************************************/
/******************************************************************************/
/* Windows */
#if _WIN32
# include "vm_win.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* OSKit */
#if OSKIT
# include "vm_osk.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* OS X */
#if defined(OS_X)
# if GENERATIONS
static int designate_modified(void *p);
# endif
# define TEST 0
# include "vm_osx.c"
# define MALLOCATOR_DEFINED
#endif
/******************************************************************************/
/* Default: mmap */
#ifndef MALLOCATOR_DEFINED
# include "vm_mmap.c"
#endif

View File

@ -0,0 +1,73 @@
/*
Provides:
posix_memalign-based allocator (uses alloc_cache.c)
determine_max_heap_size() (uses rlimit_heapsize.c)
Requires:
my_qsort (for alloc_cache.c)
LOGICALLY_ALLOCATING_PAGES(len)
ACTUALLY_ALLOCATING_PAGES(len)
LOGICALLY_FREEING_PAGES(len)
ACTUALLY_FREEING_PAGES(len)
Optional:
CHECK_USED_AGAINST_MAX(len)
DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
*/
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <errno.h>
#ifndef CHECK_USED_AGAINST_MAX
# define CHECK_USED_AGAINST_MAX(x) /* empty */
#endif
static int page_size; /* OS page size */
static void *malloc_pages(size_t len, size_t alignment)
{
void *r;
size_t extra = 0;
if (!page_size)
page_size = getpagesize();
CHECK_USED_AGAINST_MAX(len);
/* Round up to nearest page: */
if (len & (page_size - 1))
len += page_size - (len & (page_size - 1));
/* Something from the cache, perhaps? */
r = find_cached_pages(len, alignment);
if (r)
return r;
if (posix_memalign(&r, alignment, len))
return NULL;
ACTUALLY_ALLOCATING_PAGES(len);
LOGICALLY_ALLOCATING_PAGES(len);
return r;
}
static void system_free_pages(void *p, size_t len)
{
free(p);
}
static void protect_pages(void *p, size_t len, int writeable)
{
if (len & (page_size - 1)) {
len += page_size - (len & (page_size - 1));
}
mprotect(p, len, (writeable ? (PROT_READ | PROT_WRITE) : PROT_READ));
}
# include "alloc_cache.c"
/*************************************************************/
# include "rlimit_heapsize.c"

View File

@ -136,25 +136,4 @@ static void protect_pages(void *p, size_t len, int writeable)
/*************************************************************/
#ifndef DONT_NEED_MAX_HEAP_SIZE
# include <sys/time.h>
# include <sys/resource.h>
# include <unistd.h>
typedef unsigned long size_type;
static size_type determine_max_heap_size(void)
{
struct rlimit rlim;
# ifdef OS_X
getrlimit(RLIMIT_RSS, &rlim);
# else
getrlimit(RLIMIT_DATA, &rlim);
# endif
return (rlim.rlim_cur == RLIM_INFINITY) ? (1024 * 1024 * 1024) : rlim.rlim_cur;
}
#endif
# include "rlimit_heapsize.c"

View File

@ -5349,6 +5349,61 @@ void scheme_pop_continuation_frame(Scheme_Cont_Frame_Data *d)
MZ_CONT_MARK_STACK = d->cont_mark_stack;
}
static MZ_MARK_STACK_TYPE clone_meta_cont_set_mark(Scheme_Meta_Continuation *mc, Scheme_Object *val, long findpos)
{
/* Clone the meta-continuation, in case it was captured by
a continuation in its current state. */
Scheme_Meta_Continuation *naya;
Scheme_Cont_Mark *cp;
naya = MALLOC_ONE_RT(Scheme_Meta_Continuation);
memcpy(naya, mc, sizeof(Scheme_Meta_Continuation));
cp = MALLOC_N(Scheme_Cont_Mark, naya->cont_mark_shareable);
memcpy(cp, mc->cont_mark_stack_copied, naya->cont_mark_shareable * sizeof(Scheme_Cont_Mark));
naya->cont_mark_stack_copied = cp;
naya->copy_after_captured = scheme_cont_capture_count;
mc = naya;
scheme_current_thread->meta_continuation = mc;
mc->cont_mark_stack_copied[findpos].val = val;
mc->cont_mark_stack_copied[findpos].cache = NULL;
return 0;
}
static MZ_MARK_STACK_TYPE new_segment_set_mark(long segpos, long pos, Scheme_Object *key, Scheme_Object *val)
{
Scheme_Thread *p = scheme_current_thread;
Scheme_Cont_Mark *cm = NULL;
int c = p->cont_mark_seg_count;
Scheme_Cont_Mark **segs, *seg;
long findpos;
/* Note: we perform allocations before changing p to avoid GC trouble,
since MzScheme adjusts a thread's cont_mark_stack_segments on GC. */
segs = MALLOC_N(Scheme_Cont_Mark *, c + 1);
seg = scheme_malloc_allow_interior(sizeof(Scheme_Cont_Mark) * SCHEME_MARK_SEGMENT_SIZE);
segs[c] = seg;
memcpy(segs, p->cont_mark_stack_segments, c * sizeof(Scheme_Cont_Mark *));
p->cont_mark_seg_count++;
p->cont_mark_stack_segments = segs;
seg = p->cont_mark_stack_segments[segpos];
cm = seg + pos;
findpos = MZ_CONT_MARK_STACK;
MZ_CONT_MARK_STACK++;
cm->key = key;
cm->val = val;
cm->pos = MZ_CONT_MARK_POS; /* always odd */
cm->cache = NULL;
return findpos;
}
MZ_MARK_STACK_TYPE scheme_set_cont_mark(Scheme_Object *key, Scheme_Object *val)
{
Scheme_Thread *p = scheme_current_thread;
@ -5387,18 +5442,7 @@ MZ_MARK_STACK_TYPE scheme_set_cont_mark(Scheme_Object *key, Scheme_Object *val)
break;
if (mc->cont_mark_stack_copied[findpos].key == key) {
if (mc->copy_after_captured < scheme_cont_capture_count) {
/* Clone the meta-continuation, in case it was captured by
a continuation in its current state. */
Scheme_Meta_Continuation *naya;
Scheme_Cont_Mark *cp;
naya = MALLOC_ONE_RT(Scheme_Meta_Continuation);
memcpy(naya, mc, sizeof(Scheme_Meta_Continuation));
cp = MALLOC_N(Scheme_Cont_Mark, naya->cont_mark_shareable);
memcpy(cp, mc->cont_mark_stack_copied, naya->cont_mark_shareable * sizeof(Scheme_Cont_Mark));
naya->cont_mark_stack_copied = cp;
naya->copy_after_captured = scheme_cont_capture_count;
mc = naya;
p->meta_continuation = mc;
return clone_meta_cont_set_mark(mc, val, findpos);
}
mc->cont_mark_stack_copied[findpos].val = val;
mc->cont_mark_stack_copied[findpos].cache = NULL;
@ -5422,19 +5466,7 @@ MZ_MARK_STACK_TYPE scheme_set_cont_mark(Scheme_Object *key, Scheme_Object *val)
if (segpos >= p->cont_mark_seg_count) {
/* Need a new segment */
int c = p->cont_mark_seg_count;
Scheme_Cont_Mark **segs, *seg;
/* Note: we perform allocations before changing p to avoid GC trouble,
since MzScheme adjusts a thread's cont_mark_stack_segments on GC. */
segs = MALLOC_N(Scheme_Cont_Mark *, c + 1);
seg = scheme_malloc_allow_interior(sizeof(Scheme_Cont_Mark) * SCHEME_MARK_SEGMENT_SIZE);
segs[c] = seg;
memcpy(segs, p->cont_mark_stack_segments, c * sizeof(Scheme_Cont_Mark *));
p->cont_mark_seg_count++;
p->cont_mark_stack_segments = segs;
return new_segment_set_mark(segpos, pos, key, val);
}
seg = p->cont_mark_stack_segments[segpos];