scheme_malloc_code and scheme_free_code
svn: r11958
This commit is contained in:
parent
8217dddd56
commit
06efa91709
|
@ -674,6 +674,17 @@ using @cpp{scheme_free_immobile_box}.}
|
||||||
|
|
||||||
Frees an immobile box allocated with @cpp{scheme_malloc_immobile_box}.}
|
Frees an immobile box allocated with @cpp{scheme_malloc_immobile_box}.}
|
||||||
|
|
||||||
|
@function[(void* scheme_malloc_code [long size])]{
|
||||||
|
|
||||||
|
Allocates non-collectable memory to hold executable machine code. Use
|
||||||
|
this function instead of @cpp{malloc} to ensure that the allocated
|
||||||
|
memory has ``execute'' permissions. Use @cpp{scheme_free_code} to free
|
||||||
|
memory allocated by this function.}
|
||||||
|
|
||||||
|
@function[(void scheme_free_code [void* p])]{
|
||||||
|
|
||||||
|
Frees memory allocated with @cpp{scheme_malloc_code}.}
|
||||||
|
|
||||||
@function[(void scheme_register_extension_global
|
@function[(void scheme_register_extension_global
|
||||||
[void* ptr]
|
[void* ptr]
|
||||||
[long size])]{
|
[long size])]{
|
||||||
|
|
|
@ -66,7 +66,7 @@ picts. The functions @scheme[pict-width], @scheme[pict-height],
|
||||||
information from a pict.
|
information from a pict.
|
||||||
|
|
||||||
|
|
||||||
@defstruct[pict ([draw ((is-a?/c dc<%>) real? real? . -> . any)]
|
@defstruct[pict ([draw any/c]
|
||||||
[width real?]
|
[width real?]
|
||||||
[height real?]
|
[height real?]
|
||||||
[ascent real?]
|
[ascent real?]
|
||||||
|
@ -79,16 +79,18 @@ A @scheme[pict] structure is normally not created directly with
|
||||||
@scheme[make-pict]. Instead, functions like @scheme[text],
|
@scheme[make-pict]. Instead, functions like @scheme[text],
|
||||||
@scheme[hline], and @scheme[dc] are used to construct a pict.
|
@scheme[hline], and @scheme[dc] are used to construct a pict.
|
||||||
|
|
||||||
The @scheme[draw] field contains the pict's drawing procedure, which
|
The @scheme[draw] field contains the pict's drawing information in an
|
||||||
|
internal format. Roughly, the drawing information is a procedure that
|
||||||
takes a @scheme[dc<%>] drawing context and an offset for the pict's
|
takes a @scheme[dc<%>] drawing context and an offset for the pict's
|
||||||
top-left corner (i.e., it's bounding box's top left corner relative to
|
top-left corner (i.e., it's bounding box's top left corner relative to
|
||||||
the @scheme[dc<%>] origin). The state of the @scheme[dc<%>] is
|
the @scheme[dc<%>] origin). The state of the @scheme[dc<%>] is
|
||||||
intended to affect the pict's drawing; for example, the pen and brush
|
intended to affect the pict's drawing; for example, the pen and brush
|
||||||
will be set for a suitable default drawing mode, and the
|
will be set for a suitable default drawing mode, and the
|
||||||
@scheme[dc<%>] scale will be set to scale the resulting image.
|
@scheme[dc<%>] scale will be set to scale the resulting image. Use
|
||||||
|
@scheme[draw-pict] (as opposed to @scheme[pict-draw]) to draw the
|
||||||
|
picture.
|
||||||
|
|
||||||
The @scheme[panbox] field is internal, and it should be ininitialized
|
The @scheme[panbox] field is internal and initialized to @scheme[#f].
|
||||||
to @scheme[#f].
|
|
||||||
|
|
||||||
The @scheme[last] field indicates a pict within the @scheme[children]
|
The @scheme[last] field indicates a pict within the @scheme[children]
|
||||||
list (transitively) that can be treated as the last element of the
|
list (transitively) that can be treated as the last element of the
|
||||||
|
|
|
@ -2529,45 +2529,7 @@ void free_cl_cif_args(void *ignored, void *p)
|
||||||
#ifdef MZ_PRECISE_GC
|
#ifdef MZ_PRECISE_GC
|
||||||
GC_free_immobile_box((void**)(((closure_and_cif*)p)->data));
|
GC_free_immobile_box((void**)(((closure_and_cif*)p)->data));
|
||||||
#endif
|
#endif
|
||||||
free(p);
|
scheme_free_code(p);
|
||||||
}
|
|
||||||
|
|
||||||
/* This is a temporary hack to allocate a piece of executable memory, */
|
|
||||||
/* it should be removed when mzscheme's core will include a similar function */
|
|
||||||
#ifndef WINDOWS_DYNAMIC_LOAD
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#endif
|
|
||||||
void *malloc_exec(size_t size) {
|
|
||||||
static long pagesize = -1;
|
|
||||||
void *p, *pp;
|
|
||||||
if (pagesize == -1) {
|
|
||||||
#ifndef WINDOWS_DYNAMIC_LOAD
|
|
||||||
pagesize = getpagesize();
|
|
||||||
#else
|
|
||||||
{
|
|
||||||
SYSTEM_INFO info;
|
|
||||||
GetSystemInfo(&info);
|
|
||||||
pagesize = info.dwPageSize;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
p = malloc(size);
|
|
||||||
if (p == NULL)
|
|
||||||
scheme_signal_error("internal error: malloc failed (malloc_exec)");
|
|
||||||
/* set pp to the beginning of the page */
|
|
||||||
pp = (void*)(((long)p) & ~(pagesize-1));
|
|
||||||
/* set size to a pagesize multiple, in case the block is more than a page */
|
|
||||||
size = ((((long)p)+size+pagesize-1) & ~(pagesize-1)) - ((long)pp);
|
|
||||||
#ifndef WINDOWS_DYNAMIC_LOAD
|
|
||||||
if (mprotect(pp, size, PROT_READ|PROT_WRITE|PROT_EXEC))
|
|
||||||
perror("malloc_exec mprotect failure");
|
|
||||||
#else
|
|
||||||
{
|
|
||||||
DWORD old;
|
|
||||||
VirtualProtect(pp, size, PAGE_EXECUTE_READWRITE, &old);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* (ffi-callback scheme-proc in-types out-type [abi]) -> ffi-callback */
|
/* (ffi-callback scheme-proc in-types out-type [abi]) -> ffi-callback */
|
||||||
|
@ -2626,7 +2588,7 @@ static Scheme_Object *foreign_ffi_callback(int argc, Scheme_Object *argv[])
|
||||||
rtype = CTYPE_PRIMTYPE(base);
|
rtype = CTYPE_PRIMTYPE(base);
|
||||||
abi = GET_ABI(MYNAME,3);
|
abi = GET_ABI(MYNAME,3);
|
||||||
/* malloc space for everything needed, so a single free gets rid of this */
|
/* malloc space for everything needed, so a single free gets rid of this */
|
||||||
cl_cif_args = malloc_exec(sizeof(closure_and_cif) + nargs*sizeof(ffi_cif*));
|
cl_cif_args = scheme_malloc_code(sizeof(closure_and_cif) + nargs*sizeof(ffi_cif*));
|
||||||
cl = &(cl_cif_args->closure); /* cl is the same as cl_cif_args */
|
cl = &(cl_cif_args->closure); /* cl is the same as cl_cif_args */
|
||||||
cif = &(cl_cif_args->cif);
|
cif = &(cl_cif_args->cif);
|
||||||
atypes = (ffi_type **)(((char*)cl_cif_args) + sizeof(closure_and_cif));
|
atypes = (ffi_type **)(((char*)cl_cif_args) + sizeof(closure_and_cif));
|
||||||
|
|
|
@ -1969,7 +1969,7 @@ void free_cl_cif_args(void *ignored, void *p)
|
||||||
#ifdef MZ_PRECISE_GC
|
#ifdef MZ_PRECISE_GC
|
||||||
GC_free_immobile_box((void**)(((closure_and_cif*)p)->data));
|
GC_free_immobile_box((void**)(((closure_and_cif*)p)->data));
|
||||||
#endif
|
#endif
|
||||||
free(p);
|
scheme_free_code(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* (ffi-callback scheme-proc in-types out-type [abi]) -> ffi-callback */
|
/* (ffi-callback scheme-proc in-types out-type [abi]) -> ffi-callback */
|
||||||
|
@ -2026,7 +2026,7 @@ void free_cl_cif_args(void *ignored, void *p)
|
||||||
rtype = CTYPE_PRIMTYPE(base);
|
rtype = CTYPE_PRIMTYPE(base);
|
||||||
abi = GET_ABI(MYNAME,3);
|
abi = GET_ABI(MYNAME,3);
|
||||||
/* malloc space for everything needed, so a single free gets rid of this */
|
/* malloc space for everything needed, so a single free gets rid of this */
|
||||||
cl_cif_args = malloc(sizeof(closure_and_cif) + nargs*sizeof(ffi_cif*));
|
cl_cif_args = scheme_malloc_code(sizeof(closure_and_cif) + nargs*sizeof(ffi_cif*));
|
||||||
cl = &(cl_cif_args->closure); /* cl is the same as cl_cif_args */
|
cl = &(cl_cif_args->closure); /* cl is the same as cl_cif_args */
|
||||||
cif = &(cl_cif_args->cif);
|
cif = &(cl_cif_args->cif);
|
||||||
atypes = (ffi_type **)(((char*)cl_cif_args) + sizeof(closure_and_cif));
|
atypes = (ffi_type **)(((char*)cl_cif_args) + sizeof(closure_and_cif));
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
static int page_size; /* OS page size */
|
static int page_size; /* OS page size */
|
||||||
|
|
||||||
#ifndef MAP_ANON
|
#ifndef MAP_ANON
|
||||||
int fd, fd_created;
|
static int fd, fd_created;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
|
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
|
||||||
|
|
|
@ -172,6 +172,9 @@ GC_malloc
|
||||||
GC_malloc_atomic
|
GC_malloc_atomic
|
||||||
GC_malloc_stubborn
|
GC_malloc_stubborn
|
||||||
GC_malloc_uncollectable
|
GC_malloc_uncollectable
|
||||||
|
scheme_malloc_code
|
||||||
|
scheme_free_code
|
||||||
|
scheme_malloc_gcable_code
|
||||||
scheme_malloc_eternal
|
scheme_malloc_eternal
|
||||||
scheme_end_stubborn_change
|
scheme_end_stubborn_change
|
||||||
scheme_calloc
|
scheme_calloc
|
||||||
|
|
|
@ -177,6 +177,9 @@ GC_malloc_array_tagged
|
||||||
GC_malloc_allow_interior
|
GC_malloc_allow_interior
|
||||||
GC_malloc_atomic_allow_interior
|
GC_malloc_atomic_allow_interior
|
||||||
GC_malloc_tagged_allow_interior
|
GC_malloc_tagged_allow_interior
|
||||||
|
scheme_malloc_code
|
||||||
|
scheme_free_code
|
||||||
|
scheme_malloc_gcable_code
|
||||||
scheme_malloc_eternal
|
scheme_malloc_eternal
|
||||||
scheme_end_stubborn_change
|
scheme_end_stubborn_change
|
||||||
scheme_calloc
|
scheme_calloc
|
||||||
|
|
|
@ -160,6 +160,9 @@ EXPORTS
|
||||||
scheme_eval_compiled_sized_string
|
scheme_eval_compiled_sized_string
|
||||||
scheme_eval_compiled_sized_string_with_magic
|
scheme_eval_compiled_sized_string_with_magic
|
||||||
scheme_detach_multple_array
|
scheme_detach_multple_array
|
||||||
|
scheme_malloc_code
|
||||||
|
scheme_free_code
|
||||||
|
scheme_malloc_gcable_code
|
||||||
scheme_malloc_eternal
|
scheme_malloc_eternal
|
||||||
scheme_end_stubborn_change
|
scheme_end_stubborn_change
|
||||||
scheme_calloc
|
scheme_calloc
|
||||||
|
|
|
@ -169,6 +169,9 @@ EXPORTS
|
||||||
GC_malloc_allow_interior
|
GC_malloc_allow_interior
|
||||||
GC_malloc_atomic_allow_interior
|
GC_malloc_atomic_allow_interior
|
||||||
GC_malloc_tagged_allow_interior
|
GC_malloc_tagged_allow_interior
|
||||||
|
scheme_malloc_code
|
||||||
|
scheme_free_code
|
||||||
|
scheme_malloc_gcable_code
|
||||||
scheme_malloc_eternal
|
scheme_malloc_eternal
|
||||||
scheme_end_stubborn_change
|
scheme_end_stubborn_change
|
||||||
scheme_calloc
|
scheme_calloc
|
||||||
|
|
|
@ -1749,8 +1749,8 @@ MZ_EXTERN void scheme_wake_up(void);
|
||||||
MZ_EXTERN int scheme_get_external_event_fd(void);
|
MZ_EXTERN int scheme_get_external_event_fd(void);
|
||||||
|
|
||||||
/* GC registration: */
|
/* GC registration: */
|
||||||
MZ_EXTERN void scheme_set_primordial_stack_base(void *base, int no_auto_statics);
|
MZ_EXTERN void scheme_set_stack_base(void *base, int no_auto_statics);
|
||||||
MZ_EXTERN void scheme_set_primordial_stack_bounds(void *base, void *deepest, int no_auto_statics);
|
MZ_EXTERN void scheme_set_stack_bounds(void *base, void *deepest, int no_auto_statics);
|
||||||
|
|
||||||
/* Stack-preparation start-up: */
|
/* Stack-preparation start-up: */
|
||||||
typedef int (*Scheme_Nested_Main)(void *data);
|
typedef int (*Scheme_Nested_Main)(void *data);
|
||||||
|
|
|
@ -695,6 +695,7 @@
|
||||||
#else
|
#else
|
||||||
# define MZ_USE_JIT_I386
|
# define MZ_USE_JIT_I386
|
||||||
#endif
|
#endif
|
||||||
|
# define MZ_JIT_USE_MPROTECT
|
||||||
|
|
||||||
# define FLAGS_ALREADY_SET
|
# define FLAGS_ALREADY_SET
|
||||||
|
|
||||||
|
|
|
@ -396,10 +396,10 @@ static void *generate_one(mz_jit_state *old_jitter,
|
||||||
padding = 0;
|
padding = 0;
|
||||||
if (gcable) {
|
if (gcable) {
|
||||||
#ifdef MZ_PRECISE_GC
|
#ifdef MZ_PRECISE_GC
|
||||||
buffer = malloc(size);
|
buffer = scheme_malloc_code(size);
|
||||||
scheme_jit_malloced += size_pre_retained;
|
scheme_jit_malloced += size_pre_retained;
|
||||||
#else
|
#else
|
||||||
buffer = scheme_malloc(size);
|
buffer = scheme_malloc_gcable_code(size);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
buffer = malloc(size);
|
buffer = malloc(size);
|
||||||
|
@ -7880,7 +7880,7 @@ static void release_native_code(void *fnlized, void *p)
|
||||||
/* Remove name mapping: */
|
/* Remove name mapping: */
|
||||||
add_symbol((unsigned long)p, (unsigned long)p + SCHEME_INT_VAL(len), NULL, 0);
|
add_symbol((unsigned long)p, (unsigned long)p + SCHEME_INT_VAL(len), NULL, 0);
|
||||||
/* Free memory: */
|
/* Free memory: */
|
||||||
free(p);
|
scheme_free_code(p);
|
||||||
jit_notify_freed_code();
|
jit_notify_freed_code();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -34,87 +34,14 @@
|
||||||
#ifndef __lightning_funcs_h
|
#ifndef __lightning_funcs_h
|
||||||
#define __lightning_funcs_h
|
#define __lightning_funcs_h
|
||||||
|
|
||||||
#ifdef MZ_JIT_USE_MPROTECT
|
|
||||||
# include <unistd.h>
|
|
||||||
# include <sys/mman.h>
|
|
||||||
#endif
|
|
||||||
#ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
|
||||||
# include <windows.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
|
||||||
static unsigned long jit_prev_page = 0, jit_prev_length = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
jit_notify_freed_code(void)
|
jit_notify_freed_code(void)
|
||||||
{
|
{
|
||||||
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
|
||||||
jit_prev_page = jit_prev_length = 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
jit_flush_code(void *dest, void *end)
|
jit_flush_code(void *dest, void *end)
|
||||||
{
|
{
|
||||||
/* On the x86, the PROT_EXEC bits are not handled by the MMU.
|
|
||||||
However, the kernel can emulate this by setting the code
|
|
||||||
segment's limit to the end address of the highest page
|
|
||||||
whose PROT_EXEC bit is set.
|
|
||||||
|
|
||||||
Linux kernels that do so and that disable by default the
|
|
||||||
execution of the data and stack segment are becoming more
|
|
||||||
and more common (Fedora, for example), so we implement our
|
|
||||||
jit_flush_code as an mprotect. */
|
|
||||||
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
|
||||||
unsigned long page, length;
|
|
||||||
# ifdef PAGESIZE
|
|
||||||
const long page_size = PAGESIZE;
|
|
||||||
# else
|
|
||||||
static unsigned long page_size = -1;
|
|
||||||
if (page_size == -1) {
|
|
||||||
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
|
||||||
SYSTEM_INFO info;
|
|
||||||
GetSystemInfo(&info);
|
|
||||||
page_size = info.dwPageSize;
|
|
||||||
# else
|
|
||||||
page_size = sysconf (_SC_PAGESIZE);
|
|
||||||
# endif
|
|
||||||
}
|
|
||||||
# endif
|
|
||||||
|
|
||||||
page = (long) dest & ~(page_size - 1);
|
|
||||||
length = ((char *) end - (char *) page + page_size - 1) & ~(page_size - 1);
|
|
||||||
|
|
||||||
/* Simple-minded attempt at optimizing the common case where a single
|
|
||||||
chunk of memory is used to compile multiple functions. */
|
|
||||||
if (page >= jit_prev_page && page + length <= jit_prev_page + jit_prev_length)
|
|
||||||
return;
|
|
||||||
|
|
||||||
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
|
||||||
{
|
|
||||||
DWORD old;
|
|
||||||
VirtualProtect((void *)page, length, PAGE_EXECUTE_READWRITE, &old);
|
|
||||||
}
|
|
||||||
# else
|
|
||||||
mprotect ((void *) page, length, PROT_READ | PROT_WRITE | PROT_EXEC);
|
|
||||||
# endif
|
|
||||||
|
|
||||||
/* See if we can extend the previously mprotect'ed memory area towards
|
|
||||||
higher addresses: the starting address remains the same as before. */
|
|
||||||
if (page >= jit_prev_page && page <= jit_prev_page + jit_prev_length)
|
|
||||||
jit_prev_length = page + length - jit_prev_page;
|
|
||||||
|
|
||||||
/* See if we can extend the previously mprotect'ed memory area towards
|
|
||||||
lower addresses: the highest address remains the same as before. */
|
|
||||||
else if (page < jit_prev_page && page + length >= jit_prev_page
|
|
||||||
&& page + length <= jit_prev_page + jit_prev_length)
|
|
||||||
jit_prev_length += jit_prev_page - page, jit_prev_page = page;
|
|
||||||
|
|
||||||
/* Nothing to do, replace the area. */
|
|
||||||
else
|
|
||||||
jit_prev_page = page, jit_prev_length = length;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __lightning_funcs_h */
|
#endif /* __lightning_funcs_h */
|
||||||
|
|
|
@ -39,6 +39,14 @@
|
||||||
# define MALLOC malloc
|
# define MALLOC malloc
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef MZ_JIT_USE_MPROTECT
|
||||||
|
# include <unistd.h>
|
||||||
|
# include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
#ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||||
|
# include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
static void **dgc_array;
|
static void **dgc_array;
|
||||||
static int *dgc_count;
|
static int *dgc_count;
|
||||||
static int dgc_size;
|
static int dgc_size;
|
||||||
|
@ -57,12 +65,16 @@ extern MZ_DLLIMPORT void GC_register_late_disappearing_link(void **link, void *o
|
||||||
|
|
||||||
static int use_registered_statics;
|
static int use_registered_statics;
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* stack setup */
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
#if !defined(MZ_PRECISE_GC) && !defined(USE_SENORA_GC)
|
#if !defined(MZ_PRECISE_GC) && !defined(USE_SENORA_GC)
|
||||||
extern MZ_DLLIMPORT void GC_init();
|
extern MZ_DLLIMPORT void GC_init();
|
||||||
extern MZ_DLLIMPORT unsigned long GC_get_stack_base();
|
extern MZ_DLLIMPORT unsigned long GC_get_stack_base();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void scheme_set_primordial_stack_base(void *base, int no_auto_statics)
|
void scheme_set_stack_base(void *base, int no_auto_statics)
|
||||||
{
|
{
|
||||||
#ifdef MZ_PRECISE_GC
|
#ifdef MZ_PRECISE_GC
|
||||||
GC_init_type_tags(_scheme_last_type_,
|
GC_init_type_tags(_scheme_last_type_,
|
||||||
|
@ -132,7 +144,7 @@ int scheme_main_stack_setup(int no_auto_statics, Scheme_Nested_Main _main, void
|
||||||
void *stack_start;
|
void *stack_start;
|
||||||
int volatile return_code;
|
int volatile return_code;
|
||||||
|
|
||||||
scheme_set_primordial_stack_base(PROMPT_STACK(stack_start), no_auto_statics);
|
scheme_set_stack_base(PROMPT_STACK(stack_start), no_auto_statics);
|
||||||
|
|
||||||
return_code = _main(data);
|
return_code = _main(data);
|
||||||
|
|
||||||
|
@ -144,9 +156,9 @@ int scheme_main_stack_setup(int no_auto_statics, Scheme_Nested_Main _main, void
|
||||||
return return_code;
|
return return_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
void scheme_set_primordial_stack_bounds(void *base, void *deepest, int no_auto_statics)
|
void scheme_set_stack_bounds(void *base, void *deepest, int no_auto_statics)
|
||||||
{
|
{
|
||||||
scheme_set_primordial_stack_base(base, no_auto_statics);
|
scheme_set_stack_base(base, no_auto_statics);
|
||||||
|
|
||||||
#ifdef USE_STACK_BOUNDARY_VAR
|
#ifdef USE_STACK_BOUNDARY_VAR
|
||||||
if (deepest) {
|
if (deepest) {
|
||||||
|
@ -165,6 +177,9 @@ extern unsigned long scheme_get_stack_base()
|
||||||
return (unsigned long)GC_get_stack_base();
|
return (unsigned long)GC_get_stack_base();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* memory utils */
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
void scheme_dont_gc_ptr(void *p)
|
void scheme_dont_gc_ptr(void *p)
|
||||||
{
|
{
|
||||||
|
@ -286,6 +301,10 @@ scheme_strdup_eternal(const char *str)
|
||||||
return naya;
|
return naya;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* cptr */
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
Scheme_Object *scheme_make_cptr(void *cptr, Scheme_Object *typetag)
|
Scheme_Object *scheme_make_cptr(void *cptr, Scheme_Object *typetag)
|
||||||
{
|
{
|
||||||
Scheme_Object *o;
|
Scheme_Object *o;
|
||||||
|
@ -311,6 +330,10 @@ Scheme_Object *scheme_make_offset_cptr(void *cptr, long offset, Scheme_Object *t
|
||||||
return o;
|
return o;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* allocation */
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
#ifndef MZ_PRECISE_GC
|
#ifndef MZ_PRECISE_GC
|
||||||
static Scheme_Hash_Table *immobiles;
|
static Scheme_Hash_Table *immobiles;
|
||||||
#endif
|
#endif
|
||||||
|
@ -531,6 +554,396 @@ void *scheme_malloc_uncollectable_tagged(size_t s)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* code allocation */
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
|
/* We're not supposed to use mprotect() or VirtualProtect() on memory
|
||||||
|
from malloc(); Posix says that mprotect() only works on memory from
|
||||||
|
mmap(), and VirtualProtect() similarly requires alignment with a
|
||||||
|
corresponding VirtualAlloc. So we implement a little allocator here
|
||||||
|
for code chunks. */
|
||||||
|
|
||||||
|
#ifdef MZ_PRECISE_GC
|
||||||
|
START_XFORM_SKIP;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Max of desired alignment and 2 * sizeof(long): */
|
||||||
|
#define CODE_HEADER_SIZE 16
|
||||||
|
|
||||||
|
long scheme_code_page_total;
|
||||||
|
|
||||||
|
#ifndef MAP_ANON
|
||||||
|
static int fd, fd_created;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define LOG_CODE_MALLOC(lvl, s) /* if (lvl > 1) s */
|
||||||
|
#define CODE_PAGE_OF(p) ((void *)(((unsigned long)p) & ~(page_size - 1)))
|
||||||
|
|
||||||
|
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||||
|
|
||||||
|
struct free_list_entry {
|
||||||
|
long size; /* size of elements in this bucket */
|
||||||
|
void *elems; /* doubly linked list for free blocks */
|
||||||
|
int count; /* number of items in `elems' */
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct free_list_entry *free_list;
|
||||||
|
static int free_list_bucket_count;
|
||||||
|
|
||||||
|
static long get_page_size()
|
||||||
|
{
|
||||||
|
# ifdef PAGESIZE
|
||||||
|
const long page_size = PAGESIZE;
|
||||||
|
# else
|
||||||
|
static unsigned long page_size = -1;
|
||||||
|
if (page_size == -1) {
|
||||||
|
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||||
|
SYSTEM_INFO info;
|
||||||
|
GetSystemInfo(&info);
|
||||||
|
page_size = info.dwPageSize;
|
||||||
|
# else
|
||||||
|
page_size = sysconf (_SC_PAGESIZE);
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
# endif
|
||||||
|
|
||||||
|
return page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *malloc_page(long size)
|
||||||
|
{
|
||||||
|
void *r;
|
||||||
|
|
||||||
|
#ifndef MAP_ANON
|
||||||
|
if (!fd_created) {
|
||||||
|
fd_created = 1;
|
||||||
|
fd = open("/dev/zero", O_RDWR);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||||
|
{
|
||||||
|
DWORD old;
|
||||||
|
r = (void *)VirtualAlloc(NULL, size,
|
||||||
|
MEM_COMMIT | MEM_RESERVE,
|
||||||
|
/* A note in gc/os_dep.c says that VirtualAlloc
|
||||||
|
doesn't like PAGE_EXECUTE_READWRITE. In case
|
||||||
|
that's true, we use a separate VirtualProtect step. */
|
||||||
|
PAGE_READWRITE);
|
||||||
|
if (r)
|
||||||
|
VirtualProtect(r, size, PAGE_EXECUTE_READWRITE, &old);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
# ifdef MAP_ANON
|
||||||
|
r = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||||
|
# else
|
||||||
|
r = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0);
|
||||||
|
# endif
|
||||||
|
if (r == (void *)-1)
|
||||||
|
r = NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (!r)
|
||||||
|
scheme_raise_out_of_memory(NULL, NULL);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void free_page(void *p, long size)
|
||||||
|
{
|
||||||
|
#ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||||
|
VirtualFree(p, 0, MEM_RELEASE);
|
||||||
|
#else
|
||||||
|
munmap(p, size);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void init_free_list()
|
||||||
|
{
|
||||||
|
long page_size = get_page_size();
|
||||||
|
int pos = 0;
|
||||||
|
int cnt = 2;
|
||||||
|
long last_v = page_size, v;
|
||||||
|
|
||||||
|
/* Compute size that fits 2 objects per page, then 3 per page, etc.
|
||||||
|
Keeping CODE_HEADER_SIZE alignment gives us a small number of
|
||||||
|
buckets. */
|
||||||
|
while (1) {
|
||||||
|
v = (page_size - CODE_HEADER_SIZE) / cnt;
|
||||||
|
v = (v / CODE_HEADER_SIZE) * CODE_HEADER_SIZE;
|
||||||
|
if (v != last_v) {
|
||||||
|
free_list[pos].size = v;
|
||||||
|
free_list[pos].elems = NULL;
|
||||||
|
free_list[pos].count = 0;
|
||||||
|
last_v = v;
|
||||||
|
pos++;
|
||||||
|
if (v == CODE_HEADER_SIZE)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
cnt++;
|
||||||
|
}
|
||||||
|
|
||||||
|
free_list_bucket_count = pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long free_list_find_bucket(long size)
|
||||||
|
{
|
||||||
|
/* binary search */
|
||||||
|
int lo = 0, hi = free_list_bucket_count - 1, mid;
|
||||||
|
|
||||||
|
while (lo + 1 < hi) {
|
||||||
|
mid = (lo + hi) / 2;
|
||||||
|
if (free_list[mid].size > size) {
|
||||||
|
lo = mid;
|
||||||
|
} else {
|
||||||
|
hi = mid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (free_list[hi].size == size)
|
||||||
|
return hi;
|
||||||
|
else
|
||||||
|
return lo;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *scheme_malloc_code(long size)
|
||||||
|
{
|
||||||
|
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||||
|
long size2, bucket, sz, page_size;
|
||||||
|
void *p, *pg, *prev;
|
||||||
|
|
||||||
|
if (size < CODE_HEADER_SIZE) {
|
||||||
|
/* ensure CODE_HEADER_SIZE alignment
|
||||||
|
and room for free-list pointers */
|
||||||
|
size = CODE_HEADER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_size = get_page_size();
|
||||||
|
|
||||||
|
if ((2 * size + CODE_HEADER_SIZE) > page_size) {
|
||||||
|
/* allocate large object on its own page(s) */
|
||||||
|
sz = size + CODE_HEADER_SIZE;
|
||||||
|
sz = (sz + page_size - 1) & ~(page_size - 1);
|
||||||
|
pg = malloc_page(sz);
|
||||||
|
scheme_code_page_total += sz;
|
||||||
|
*(long *)pg = sz;
|
||||||
|
LOG_CODE_MALLOC(1, printf("allocated large %p (%ld) [now %ld]\n",
|
||||||
|
pg, size + CODE_HEADER_SIZE, scheme_code_page_total));
|
||||||
|
return ((char *)pg) + CODE_HEADER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!free_list) {
|
||||||
|
free_list = (struct free_list_entry *)malloc_page(page_size);
|
||||||
|
scheme_code_page_total += page_size;
|
||||||
|
init_free_list();
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket = free_list_find_bucket(size);
|
||||||
|
size2 = free_list[bucket].size;
|
||||||
|
|
||||||
|
if (!free_list[bucket].elems) {
|
||||||
|
/* add a new page's worth of items to the free list */
|
||||||
|
int i, count = 0;
|
||||||
|
pg = malloc_page(page_size);
|
||||||
|
scheme_code_page_total += page_size;
|
||||||
|
LOG_CODE_MALLOC(2, printf("new page for %ld / %ld at %p [now %ld]\n",
|
||||||
|
size2, bucket, pg, scheme_code_page_total));
|
||||||
|
sz = page_size - size2;
|
||||||
|
for (i = CODE_HEADER_SIZE; i <= sz; i += size2) {
|
||||||
|
p = ((char *)pg) + i;
|
||||||
|
prev = free_list[bucket].elems;
|
||||||
|
((void **)p)[0] = prev;
|
||||||
|
((void **)p)[1] = NULL;
|
||||||
|
if (prev)
|
||||||
|
((void **)prev)[1] = p;
|
||||||
|
free_list[bucket].elems = p;
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
((long *)pg)[0] = bucket; /* first long of page indicates bucket */
|
||||||
|
((long *)pg)[1] = 0; /* second long indicates number of allocated on page */
|
||||||
|
free_list[bucket].count = count;
|
||||||
|
}
|
||||||
|
|
||||||
|
p = free_list[bucket].elems;
|
||||||
|
prev = ((void **)p)[0];
|
||||||
|
free_list[bucket].elems = prev;
|
||||||
|
--free_list[bucket].count;
|
||||||
|
if (prev)
|
||||||
|
((void **)prev)[1] = NULL;
|
||||||
|
((long *)CODE_PAGE_OF(p))[1] += 1;
|
||||||
|
|
||||||
|
LOG_CODE_MALLOC(0, printf("allocated %ld (->%ld / %ld)\n", size, size2, bucket));
|
||||||
|
|
||||||
|
return p;
|
||||||
|
#else
|
||||||
|
return malloc(size); /* good luck! */
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void scheme_free_code(void *p)
|
||||||
|
{
|
||||||
|
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||||
|
long size, size2, bucket, page_size;
|
||||||
|
int per_page, n;
|
||||||
|
void *prev;
|
||||||
|
|
||||||
|
page_size = get_page_size();
|
||||||
|
|
||||||
|
size = *(long *)CODE_PAGE_OF(p);
|
||||||
|
|
||||||
|
if (size >= page_size) {
|
||||||
|
/* it was a large object on its own page(s) */
|
||||||
|
scheme_code_page_total -= size;
|
||||||
|
LOG_CODE_MALLOC(1, printf("freeing large %p (%ld) [%ld left]\n",
|
||||||
|
p, size, scheme_code_page_total));
|
||||||
|
free_page((char *)p - CODE_HEADER_SIZE, size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket = size;
|
||||||
|
|
||||||
|
if ((bucket < 0) || (bucket >= free_list_bucket_count)) {
|
||||||
|
printf("bad free: %p\n", (char *)p + CODE_HEADER_SIZE);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
size2 = free_list[bucket].size;
|
||||||
|
|
||||||
|
LOG_CODE_MALLOC(0, printf("freeing %ld / %ld\n", size2, bucket));
|
||||||
|
|
||||||
|
/* decrement alloc count for this page: */
|
||||||
|
per_page = (page_size - CODE_HEADER_SIZE) / size2;
|
||||||
|
n = ((long *)CODE_PAGE_OF(p))[1];
|
||||||
|
/* double-check: */
|
||||||
|
if ((n < 1) || (n > per_page)) {
|
||||||
|
printf("bad free: %p\n", (char *)p + CODE_HEADER_SIZE);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
n--;
|
||||||
|
((long *)CODE_PAGE_OF(p))[1] = n;
|
||||||
|
|
||||||
|
/* add to free list: */
|
||||||
|
prev = free_list[bucket].elems;
|
||||||
|
((void **)p)[0] = prev;
|
||||||
|
((void **)p)[1] = NULL;
|
||||||
|
if (prev)
|
||||||
|
((void **)prev)[1] = p;
|
||||||
|
free_list[bucket].elems = p;
|
||||||
|
free_list[bucket].count++;
|
||||||
|
|
||||||
|
/* Free whole page if it's completely on the free list, and if there
|
||||||
|
are enough buckets on other pages. */
|
||||||
|
if ((n == 0) && ((free_list[bucket].count - per_page) >= (per_page / 2))) {
|
||||||
|
/* remove same-page elements from free list, then free page */
|
||||||
|
int i;
|
||||||
|
long sz;
|
||||||
|
void *pg;
|
||||||
|
|
||||||
|
sz = page_size - size2;
|
||||||
|
pg = CODE_PAGE_OF(p);
|
||||||
|
for (i = CODE_HEADER_SIZE; i <= sz; i += size2) {
|
||||||
|
p = ((char *)pg) + i;
|
||||||
|
prev = ((void **)p)[1];
|
||||||
|
if (prev)
|
||||||
|
((void **)prev)[0] = ((void **)p)[0];
|
||||||
|
else
|
||||||
|
free_list[bucket].elems = ((void **)p)[0];
|
||||||
|
prev = ((void **)p)[0];
|
||||||
|
if (prev)
|
||||||
|
((void **)prev)[1] = ((void **)p)[1];
|
||||||
|
--free_list[bucket].count;
|
||||||
|
}
|
||||||
|
|
||||||
|
scheme_code_page_total -= page_size;
|
||||||
|
LOG_CODE_MALLOC(2, printf("freeing page at %p [%ld left]\n",
|
||||||
|
CODE_PAGE_OF(p), scheme_code_page_total));
|
||||||
|
free_page(CODE_PAGE_OF(p), page_size);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
free(p);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef MZ_PRECISE_GC
|
||||||
|
|
||||||
|
/* When using the CGC allocator, we know how GCable memory is
|
||||||
|
allocated, and we expect mprotect(), etc., to work on it. The JIT
|
||||||
|
currently takes advantage of that combination, so we support it
|
||||||
|
with scheme_malloc_gcable_code() --- but only in CGC mode. */
|
||||||
|
|
||||||
|
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||||
|
static unsigned long jit_prev_page = 0, jit_prev_length = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void *scheme_malloc_gcable_code(long size)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
p = scheme_malloc(size);
|
||||||
|
|
||||||
|
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||||
|
{
|
||||||
|
/* [This chunk of code moved from our copy of GNU lightning to here.] */
|
||||||
|
unsigned long page, length, page_size;
|
||||||
|
void *end;
|
||||||
|
|
||||||
|
page_size = get_page_size();
|
||||||
|
|
||||||
|
end = ((char *)p) + size;
|
||||||
|
|
||||||
|
page = (long) p & ~(page_size - 1);
|
||||||
|
length = ((char *) end - (char *) page + page_size - 1) & ~(page_size - 1);
|
||||||
|
|
||||||
|
/* Simple-minded attempt at optimizing the common case where a single
|
||||||
|
chunk of memory is used to compile multiple functions. */
|
||||||
|
if (!(page >= jit_prev_page && page + length <= jit_prev_page + jit_prev_length)) {
|
||||||
|
|
||||||
|
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||||
|
{
|
||||||
|
DWORD old;
|
||||||
|
VirtualProtect((void *)page, length, PAGE_EXECUTE_READWRITE, &old);
|
||||||
|
}
|
||||||
|
# else
|
||||||
|
mprotect ((void *) page, length, PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||||
|
# endif
|
||||||
|
|
||||||
|
/* See if we can extend the previously mprotect'ed memory area towards
|
||||||
|
higher addresses: the starting address remains the same as before. */
|
||||||
|
if (page >= jit_prev_page && page <= jit_prev_page + jit_prev_length)
|
||||||
|
jit_prev_length = page + length - jit_prev_page;
|
||||||
|
|
||||||
|
/* See if we can extend the previously mprotect'ed memory area towards
|
||||||
|
lower addresses: the highest address remains the same as before. */
|
||||||
|
else if (page < jit_prev_page && page + length >= jit_prev_page
|
||||||
|
&& page + length <= jit_prev_page + jit_prev_length)
|
||||||
|
jit_prev_length += jit_prev_page - page, jit_prev_page = page;
|
||||||
|
|
||||||
|
/* Nothing to do, replace the area. */
|
||||||
|
else
|
||||||
|
jit_prev_page = page, jit_prev_length = length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
void scheme_notify_code_gc()
|
||||||
|
{
|
||||||
|
jit_prev_page = 0;
|
||||||
|
jit_prev_length = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MZ_PRECISE_GC
|
||||||
|
END_XFORM_SKIP;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* finalization */
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
typedef struct Finalization {
|
typedef struct Finalization {
|
||||||
MZTAG_IF_REQUIRED
|
MZTAG_IF_REQUIRED
|
||||||
void (*f)(void *o, void *data);
|
void (*f)(void *o, void *data);
|
||||||
|
@ -812,31 +1225,9 @@ unsigned long scheme_get_deeper_address(void)
|
||||||
return (unsigned long)vp;
|
return (unsigned long)vp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* GC_dump */
|
||||||
#ifdef DOS_MEMORY
|
/************************************************************************/
|
||||||
|
|
||||||
int scheme_same_pointer(void *a, void *b)
|
|
||||||
{
|
|
||||||
long as, ao, bs, bo, areal, breal;
|
|
||||||
|
|
||||||
as = FP_SEG(a);
|
|
||||||
ao = FP_OFF(a);
|
|
||||||
bs = FP_SEG(b);
|
|
||||||
bo = FP_SEG(b);
|
|
||||||
|
|
||||||
areal = (as << 4) + ao;
|
|
||||||
breal = (bs << 4) + bo;
|
|
||||||
|
|
||||||
return areal == breal;
|
|
||||||
}
|
|
||||||
|
|
||||||
int scheme_diff_pointer(void *a, void *b)
|
|
||||||
{
|
|
||||||
return !scheme_same_pointer(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef MZ_PRECISE_GC
|
#ifndef MZ_PRECISE_GC
|
||||||
# ifdef __cplusplus
|
# ifdef __cplusplus
|
||||||
|
|
|
@ -358,6 +358,13 @@ MZ_EXTERN void *GC_malloc_uncollectable(size_t size_in_bytes);
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
MZ_EXTERN void *scheme_malloc_code(long size);
|
||||||
|
MZ_EXTERN void scheme_free_code(void *p);
|
||||||
|
#ifndef MZ_PRECISE_GC
|
||||||
|
MZ_EXTERN void *scheme_malloc_gcable_code(long size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
MZ_EXTERN void *scheme_malloc_eternal(size_t n);
|
MZ_EXTERN void *scheme_malloc_eternal(size_t n);
|
||||||
MZ_EXTERN void scheme_end_stubborn_change(void *p);
|
MZ_EXTERN void scheme_end_stubborn_change(void *p);
|
||||||
|
|
||||||
|
|
|
@ -291,6 +291,11 @@ void *(*GC_malloc_uncollectable)(size_t size_in_bytes);
|
||||||
# endif
|
# endif
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
void *(*scheme_malloc_code)(long size);
|
||||||
|
void (*scheme_free_code)(void *p);
|
||||||
|
#ifndef MZ_PRECISE_GC
|
||||||
|
void *(*scheme_malloc_gcable_code)(long size);
|
||||||
|
#endif
|
||||||
void *(*scheme_malloc_eternal)(size_t n);
|
void *(*scheme_malloc_eternal)(size_t n);
|
||||||
void (*scheme_end_stubborn_change)(void *p);
|
void (*scheme_end_stubborn_change)(void *p);
|
||||||
void *(*scheme_calloc)(size_t num, size_t size);
|
void *(*scheme_calloc)(size_t num, size_t size);
|
||||||
|
|
|
@ -193,6 +193,11 @@
|
||||||
scheme_extension_table->GC_malloc_uncollectable = GC_malloc_uncollectable;
|
scheme_extension_table->GC_malloc_uncollectable = GC_malloc_uncollectable;
|
||||||
# endif
|
# endif
|
||||||
# endif
|
# endif
|
||||||
|
#endif
|
||||||
|
scheme_extension_table->scheme_malloc_code = scheme_malloc_code;
|
||||||
|
scheme_extension_table->scheme_free_code = scheme_free_code;
|
||||||
|
#ifndef MZ_PRECISE_GC
|
||||||
|
scheme_extension_table->scheme_malloc_gcable_code = scheme_malloc_gcable_code;
|
||||||
#endif
|
#endif
|
||||||
scheme_extension_table->scheme_malloc_eternal = scheme_malloc_eternal;
|
scheme_extension_table->scheme_malloc_eternal = scheme_malloc_eternal;
|
||||||
scheme_extension_table->scheme_end_stubborn_change = scheme_end_stubborn_change;
|
scheme_extension_table->scheme_end_stubborn_change = scheme_end_stubborn_change;
|
||||||
|
|
|
@ -194,6 +194,11 @@
|
||||||
# endif
|
# endif
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
#define scheme_malloc_code (scheme_extension_table->scheme_malloc_code)
|
||||||
|
#define scheme_free_code (scheme_extension_table->scheme_free_code)
|
||||||
|
#ifndef MZ_PRECISE_GC
|
||||||
|
#define scheme_malloc_gcable_code (scheme_extension_table->scheme_malloc_gcable_code)
|
||||||
|
#endif
|
||||||
#define scheme_malloc_eternal (scheme_extension_table->scheme_malloc_eternal)
|
#define scheme_malloc_eternal (scheme_extension_table->scheme_malloc_eternal)
|
||||||
#define scheme_end_stubborn_change (scheme_extension_table->scheme_end_stubborn_change)
|
#define scheme_end_stubborn_change (scheme_extension_table->scheme_end_stubborn_change)
|
||||||
#define scheme_calloc (scheme_extension_table->scheme_calloc)
|
#define scheme_calloc (scheme_extension_table->scheme_calloc)
|
||||||
|
|
|
@ -1007,6 +1007,9 @@ Scheme_Object *scheme_unclose_case_lambda(Scheme_Object *expr, int jit);
|
||||||
Scheme_Object *scheme_native_stack_trace(void);
|
Scheme_Object *scheme_native_stack_trace(void);
|
||||||
void scheme_clean_native_symtab(void);
|
void scheme_clean_native_symtab(void);
|
||||||
void scheme_clean_cust_box_list(void);
|
void scheme_clean_cust_box_list(void);
|
||||||
|
#ifndef MZ_PRECISE_GC
|
||||||
|
void scheme_notify_code_gc(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*========================================================================*/
|
/*========================================================================*/
|
||||||
/* control flow */
|
/* control flow */
|
||||||
|
|
|
@ -253,6 +253,9 @@ static void clean_symbol_table(void)
|
||||||
# ifndef MZ_PRECISE_GC
|
# ifndef MZ_PRECISE_GC
|
||||||
scheme_clean_cust_box_list();
|
scheme_clean_cust_box_list();
|
||||||
# endif
|
# endif
|
||||||
|
# ifndef MZ_PRECISE_GC
|
||||||
|
scheme_notify_code_gc();
|
||||||
|
# endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user