fix problem with deallocating JIT-generated code and then allocating new code in its place, where the relevant allocator is smart enough to unmap and the map a page that contains the object

svn: r5520
This commit is contained in:
Matthew Flatt 2007-01-31 11:35:09 +00:00
parent c9e5484762
commit f0cadda10a
7 changed files with 59 additions and 21 deletions

View File

@ -50,7 +50,7 @@ static void collapse_adjacent_pages(void)
} }
} }
inline static void *find_cached_pages(size_t len, size_t alignment) inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok)
{ {
int i; int i;
void *r; void *r;
@ -62,7 +62,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment)
if (!alignment || !((unsigned long)r & (alignment - 1))) { if (!alignment || !((unsigned long)r & (alignment - 1))) {
blockfree[i].start = NULL; blockfree[i].start = NULL;
blockfree[i].len = 0; blockfree[i].len = 0;
if (!blockfree[i].zeroed) if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len); memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len); LOGICALLY_ALLOCATING_PAGES(len);
return r; return r;
@ -78,7 +78,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment)
if (!alignment || !((unsigned long)r & (alignment - 1))) { if (!alignment || !((unsigned long)r & (alignment - 1))) {
blockfree[i].start += len; blockfree[i].start += len;
blockfree[i].len -= len; blockfree[i].len -= len;
if (!blockfree[i].zeroed) if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len); memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len); LOGICALLY_ALLOCATING_PAGES(len);
return r; return r;
@ -88,7 +88,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment)
r = blockfree[i].start + (blockfree[i].len - len); r = blockfree[i].start + (blockfree[i].len - len);
if (!((unsigned long)r & (alignment - 1))) { if (!((unsigned long)r & (alignment - 1))) {
blockfree[i].len -= len; blockfree[i].len -= len;
if (!blockfree[i].zeroed) if (!blockfree[i].zeroed && !dirty_ok)
memset(r, 0, len); memset(r, 0, len);
LOGICALLY_ALLOCATING_PAGES(len); LOGICALLY_ALLOCATING_PAGES(len);
return r; return r;

View File

@ -171,8 +171,6 @@ inline static void free_used_pages(size_t len)
#include "protect_range.c" #include "protect_range.c"
#define malloc_dirty_pages(size,align) malloc_pages(size,align)
/*****************************************************************************/ /*****************************************************************************/
/* Memory Tracing, Part 1 */ /* Memory Tracing, Part 1 */
/*****************************************************************************/ /*****************************************************************************/
@ -369,6 +367,8 @@ static size_t round_to_apage_size(size_t sizeb)
static unsigned long custodian_single_time_limit(int set); static unsigned long custodian_single_time_limit(int set);
inline static int thread_get_owner(void *p); inline static int thread_get_owner(void *p);
static int atomic_counter = 0;
/* the core allocation functions */ /* the core allocation functions */
static void *allocate_big(size_t sizeb, int type) static void *allocate_big(size_t sizeb, int type)
{ {
@ -2078,6 +2078,7 @@ void GC_mark(const void *const_p)
} else { } else {
/* Allocate and prep the page */ /* Allocate and prep the page */
work = (struct mpage *)malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE); work = (struct mpage *)malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE);
memset(work, 0, sizeof(struct mpage));
work->generation = 1; work->generation = 1;
work->page_type = type; work->page_type = type;
work->size = work->previous_size = HEADER_SIZEB; work->size = work->previous_size = HEADER_SIZEB;
@ -2490,6 +2491,7 @@ struct mpage *allocate_compact_target(struct mpage *work)
struct mpage *npage; struct mpage *npage;
npage = malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE); npage = malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE);
memset(npage, 0, sizeof(struct mpage));
npage->previous_size = npage->size = HEADER_SIZEB; npage->previous_size = npage->size = HEADER_SIZEB;
npage->generation = 1; npage->generation = 1;
npage->back_pointers = 0; npage->back_pointers = 0;

View File

@ -35,7 +35,7 @@ static int page_size; /* OS page size */
int fd, fd_created; int fd, fd_created;
#endif #endif
inline static void *find_cached_pages(size_t len, size_t alignment); inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
static void free_actual_pages(void *p, size_t len, int zeroed); static void free_actual_pages(void *p, size_t len, int zeroed);
/* Instead of immediately freeing pages with munmap---only to mmap /* Instead of immediately freeing pages with munmap---only to mmap
@ -48,7 +48,7 @@ static void free_actual_pages(void *p, size_t len, int zeroed);
mechanism, but we do a bit of work to collapse adjacent pages in mechanism, but we do a bit of work to collapse adjacent pages in
the cache. */ the cache. */
static void *malloc_pages(size_t len, size_t alignment) static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{ {
void *r; void *r;
size_t extra = 0; size_t extra = 0;
@ -70,7 +70,7 @@ static void *malloc_pages(size_t len, size_t alignment)
len += page_size - (len & (page_size - 1)); len += page_size - (len & (page_size - 1));
/* Something from the cache, perhaps? */ /* Something from the cache, perhaps? */
r = find_cached_pages(len, alignment); r = find_cached_pages(len, alignment, dirty_ok);
if (r) if (r)
return r; return r;
@ -116,7 +116,15 @@ static void *malloc_pages(size_t len, size_t alignment)
return r; return r;
} }
#define malloc_dirty_pages(size,align) malloc_pages(size,align) static void *malloc_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 1);
}
static void system_free_pages(void *p, size_t len) static void system_free_pages(void *p, size_t len)
{ {

View File

@ -59,7 +59,7 @@ int designate_modified(void *p);
#endif #endif
/* Forward declarations: */ /* Forward declarations: */
inline static void *find_cached_pages(size_t len, size_t alignment); inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
static void free_actual_pages(void *p, size_t len, int zeroed); static void free_actual_pages(void *p, size_t len, int zeroed);
/* the structure of an exception msg and its reply */ /* the structure of an exception msg and its reply */
@ -93,7 +93,7 @@ static mach_port_t task_self = 0;
static mach_port_t exc_port = 0; static mach_port_t exc_port = 0;
/* the VM subsystem as defined by the GC files */ /* the VM subsystem as defined by the GC files */
static void *malloc_pages(size_t len, size_t alignment) static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{ {
kern_return_t retval; kern_return_t retval;
size_t extra = 0; size_t extra = 0;
@ -107,7 +107,7 @@ static void *malloc_pages(size_t len, size_t alignment)
if(len & (page_size - 1)) if(len & (page_size - 1))
len += page_size - (len & (page_size - 1)); len += page_size - (len & (page_size - 1));
r = find_cached_pages(len, alignment); r = find_cached_pages(len, alignment, dirty_ok);
if (r) if (r)
return r; return r;
@ -157,6 +157,16 @@ static void *malloc_pages(size_t len, size_t alignment)
return r; return r;
} }
static void *malloc_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 0);
}
static void *malloc_dirty_pages(size_t len, size_t alignment)
{
return do_malloc_pages(len, alignment, 1);
}
static void system_free_pages(void *p, size_t len) static void system_free_pages(void *p, size_t len)
{ {
kern_return_t retval; kern_return_t retval;

View File

@ -6384,6 +6384,7 @@ void scheme_clean_native_symtab(void)
{ {
#ifndef MZ_PRECISE_GC #ifndef MZ_PRECISE_GC
clear_symbols_for_collected(); clear_symbols_for_collected();
jit_notify_freed_code();
#endif #endif
} }
@ -6398,6 +6399,7 @@ static void release_native_code(void *fnlized, void *p)
add_symbol((unsigned long)p, (unsigned long)p + SCHEME_INT_VAL(len), NULL, 0); add_symbol((unsigned long)p, (unsigned long)p + SCHEME_INT_VAL(len), NULL, 0);
/* Free memory: */ /* Free memory: */
free(p); free(p);
jit_notify_freed_code();
} }
#endif #endif

View File

@ -42,6 +42,18 @@
# include <windows.h> # include <windows.h>
#endif #endif
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
static unsigned long jit_prev_page = 0, jit_prev_length = 0;
#endif
static void
jit_notify_freed_code(void)
{
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
jit_prev_page = jit_prev_length = 0;
#endif
}
static void static void
jit_flush_code(void *dest, void *end) jit_flush_code(void *dest, void *end)
{ {
@ -55,7 +67,6 @@ jit_flush_code(void *dest, void *end)
and more common (Fedora, for example), so we implement our and more common (Fedora, for example), so we implement our
jit_flush_code as an mprotect. */ jit_flush_code as an mprotect. */
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC) #if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
static unsigned long prev_page = 0, prev_length = 0;
unsigned long page, length; unsigned long page, length;
# ifdef PAGESIZE # ifdef PAGESIZE
const long page_size = PAGESIZE; const long page_size = PAGESIZE;
@ -77,7 +88,7 @@ jit_flush_code(void *dest, void *end)
/* Simple-minded attempt at optimizing the common case where a single /* Simple-minded attempt at optimizing the common case where a single
chunk of memory is used to compile multiple functions. */ chunk of memory is used to compile multiple functions. */
if (page >= prev_page && page + length <= prev_page + prev_length) if (page >= jit_prev_page && page + length <= jit_prev_page + jit_prev_length)
return; return;
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC # ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
@ -91,18 +102,18 @@ jit_flush_code(void *dest, void *end)
/* See if we can extend the previously mprotect'ed memory area towards /* See if we can extend the previously mprotect'ed memory area towards
higher addresses: the starting address remains the same as before. */ higher addresses: the starting address remains the same as before. */
if (page >= prev_page && page <= prev_page + prev_length) if (page >= jit_prev_page && page <= jit_prev_page + jit_prev_length)
prev_length = page + length - prev_page; jit_prev_length = page + length - jit_prev_page;
/* See if we can extend the previously mprotect'ed memory area towards /* See if we can extend the previously mprotect'ed memory area towards
lower addresses: the highest address remains the same as before. */ lower addresses: the highest address remains the same as before. */
else if (page < prev_page && page + length >= prev_page else if (page < jit_prev_page && page + length >= jit_prev_page
&& page + length <= prev_page + prev_length) && page + length <= jit_prev_page + jit_prev_length)
prev_length += prev_page - page, prev_page = page; jit_prev_length += jit_prev_page - page, jit_prev_page = page;
/* Nothing to do, replace the area. */ /* Nothing to do, replace the area. */
else else
prev_page = page, prev_length = length; jit_prev_page = page, jit_prev_length = length;
#endif #endif
} }

View File

@ -34,6 +34,11 @@
#ifndef __lightning_funcs_h #ifndef __lightning_funcs_h
#define __lightning_funcs_h #define __lightning_funcs_h
static void
jit_notify_freed_code(void)
{
}
#if !defined(__GNUC__) && !defined(__GNUG__) #if !defined(__GNUC__) && !defined(__GNUG__)
#error Go get GNU C, I do not know how to flush the cache #error Go get GNU C, I do not know how to flush the cache
#error with this compiler. #error with this compiler.