fix problem with deallocating JIT-generated code and then allocating new code in its place, where the relevant allocator is smart enough to unmap and the map a page that contains the object
svn: r5520
This commit is contained in:
parent
c9e5484762
commit
f0cadda10a
|
@ -50,7 +50,7 @@ static void collapse_adjacent_pages(void)
|
|||
}
|
||||
}
|
||||
|
||||
inline static void *find_cached_pages(size_t len, size_t alignment)
|
||||
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
int i;
|
||||
void *r;
|
||||
|
@ -62,7 +62,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment)
|
|||
if (!alignment || !((unsigned long)r & (alignment - 1))) {
|
||||
blockfree[i].start = NULL;
|
||||
blockfree[i].len = 0;
|
||||
if (!blockfree[i].zeroed)
|
||||
if (!blockfree[i].zeroed && !dirty_ok)
|
||||
memset(r, 0, len);
|
||||
LOGICALLY_ALLOCATING_PAGES(len);
|
||||
return r;
|
||||
|
@ -78,7 +78,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment)
|
|||
if (!alignment || !((unsigned long)r & (alignment - 1))) {
|
||||
blockfree[i].start += len;
|
||||
blockfree[i].len -= len;
|
||||
if (!blockfree[i].zeroed)
|
||||
if (!blockfree[i].zeroed && !dirty_ok)
|
||||
memset(r, 0, len);
|
||||
LOGICALLY_ALLOCATING_PAGES(len);
|
||||
return r;
|
||||
|
@ -88,7 +88,7 @@ inline static void *find_cached_pages(size_t len, size_t alignment)
|
|||
r = blockfree[i].start + (blockfree[i].len - len);
|
||||
if (!((unsigned long)r & (alignment - 1))) {
|
||||
blockfree[i].len -= len;
|
||||
if (!blockfree[i].zeroed)
|
||||
if (!blockfree[i].zeroed && !dirty_ok)
|
||||
memset(r, 0, len);
|
||||
LOGICALLY_ALLOCATING_PAGES(len);
|
||||
return r;
|
||||
|
|
|
@ -171,8 +171,6 @@ inline static void free_used_pages(size_t len)
|
|||
|
||||
#include "protect_range.c"
|
||||
|
||||
#define malloc_dirty_pages(size,align) malloc_pages(size,align)
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Memory Tracing, Part 1 */
|
||||
/*****************************************************************************/
|
||||
|
@ -369,6 +367,8 @@ static size_t round_to_apage_size(size_t sizeb)
|
|||
static unsigned long custodian_single_time_limit(int set);
|
||||
inline static int thread_get_owner(void *p);
|
||||
|
||||
static int atomic_counter = 0;
|
||||
|
||||
/* the core allocation functions */
|
||||
static void *allocate_big(size_t sizeb, int type)
|
||||
{
|
||||
|
@ -2078,6 +2078,7 @@ void GC_mark(const void *const_p)
|
|||
} else {
|
||||
/* Allocate and prep the page */
|
||||
work = (struct mpage *)malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE);
|
||||
memset(work, 0, sizeof(struct mpage));
|
||||
work->generation = 1;
|
||||
work->page_type = type;
|
||||
work->size = work->previous_size = HEADER_SIZEB;
|
||||
|
@ -2490,6 +2491,7 @@ struct mpage *allocate_compact_target(struct mpage *work)
|
|||
struct mpage *npage;
|
||||
|
||||
npage = malloc_dirty_pages(APAGE_SIZE, APAGE_SIZE);
|
||||
memset(npage, 0, sizeof(struct mpage));
|
||||
npage->previous_size = npage->size = HEADER_SIZEB;
|
||||
npage->generation = 1;
|
||||
npage->back_pointers = 0;
|
||||
|
|
|
@ -35,7 +35,7 @@ static int page_size; /* OS page size */
|
|||
int fd, fd_created;
|
||||
#endif
|
||||
|
||||
inline static void *find_cached_pages(size_t len, size_t alignment);
|
||||
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
|
||||
static void free_actual_pages(void *p, size_t len, int zeroed);
|
||||
|
||||
/* Instead of immediately freeing pages with munmap---only to mmap
|
||||
|
@ -48,7 +48,7 @@ static void free_actual_pages(void *p, size_t len, int zeroed);
|
|||
mechanism, but we do a bit of work to collapse adjacent pages in
|
||||
the cache. */
|
||||
|
||||
static void *malloc_pages(size_t len, size_t alignment)
|
||||
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
void *r;
|
||||
size_t extra = 0;
|
||||
|
@ -70,7 +70,7 @@ static void *malloc_pages(size_t len, size_t alignment)
|
|||
len += page_size - (len & (page_size - 1));
|
||||
|
||||
/* Something from the cache, perhaps? */
|
||||
r = find_cached_pages(len, alignment);
|
||||
r = find_cached_pages(len, alignment, dirty_ok);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -116,7 +116,15 @@ static void *malloc_pages(size_t len, size_t alignment)
|
|||
return r;
|
||||
}
|
||||
|
||||
#define malloc_dirty_pages(size,align) malloc_pages(size,align)
|
||||
static void *malloc_pages(size_t len, size_t alignment)
|
||||
{
|
||||
return do_malloc_pages(len, alignment, 0);
|
||||
}
|
||||
|
||||
static void *malloc_dirty_pages(size_t len, size_t alignment)
|
||||
{
|
||||
return do_malloc_pages(len, alignment, 1);
|
||||
}
|
||||
|
||||
static void system_free_pages(void *p, size_t len)
|
||||
{
|
||||
|
|
|
@ -59,7 +59,7 @@ int designate_modified(void *p);
|
|||
#endif
|
||||
|
||||
/* Forward declarations: */
|
||||
inline static void *find_cached_pages(size_t len, size_t alignment);
|
||||
inline static void *find_cached_pages(size_t len, size_t alignment, int dirty_ok);
|
||||
static void free_actual_pages(void *p, size_t len, int zeroed);
|
||||
|
||||
/* the structure of an exception msg and its reply */
|
||||
|
@ -93,7 +93,7 @@ static mach_port_t task_self = 0;
|
|||
static mach_port_t exc_port = 0;
|
||||
|
||||
/* the VM subsystem as defined by the GC files */
|
||||
static void *malloc_pages(size_t len, size_t alignment)
|
||||
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
|
||||
{
|
||||
kern_return_t retval;
|
||||
size_t extra = 0;
|
||||
|
@ -107,7 +107,7 @@ static void *malloc_pages(size_t len, size_t alignment)
|
|||
if(len & (page_size - 1))
|
||||
len += page_size - (len & (page_size - 1));
|
||||
|
||||
r = find_cached_pages(len, alignment);
|
||||
r = find_cached_pages(len, alignment, dirty_ok);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -157,6 +157,16 @@ static void *malloc_pages(size_t len, size_t alignment)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void *malloc_pages(size_t len, size_t alignment)
|
||||
{
|
||||
return do_malloc_pages(len, alignment, 0);
|
||||
}
|
||||
|
||||
static void *malloc_dirty_pages(size_t len, size_t alignment)
|
||||
{
|
||||
return do_malloc_pages(len, alignment, 1);
|
||||
}
|
||||
|
||||
static void system_free_pages(void *p, size_t len)
|
||||
{
|
||||
kern_return_t retval;
|
||||
|
|
|
@ -6384,6 +6384,7 @@ void scheme_clean_native_symtab(void)
|
|||
{
|
||||
#ifndef MZ_PRECISE_GC
|
||||
clear_symbols_for_collected();
|
||||
jit_notify_freed_code();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -6398,6 +6399,7 @@ static void release_native_code(void *fnlized, void *p)
|
|||
add_symbol((unsigned long)p, (unsigned long)p + SCHEME_INT_VAL(len), NULL, 0);
|
||||
/* Free memory: */
|
||||
free(p);
|
||||
jit_notify_freed_code();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -42,6 +42,18 @@
|
|||
# include <windows.h>
|
||||
#endif
|
||||
|
||||
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||
static unsigned long jit_prev_page = 0, jit_prev_length = 0;
|
||||
#endif
|
||||
|
||||
static void
|
||||
jit_notify_freed_code(void)
|
||||
{
|
||||
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||
jit_prev_page = jit_prev_length = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
jit_flush_code(void *dest, void *end)
|
||||
{
|
||||
|
@ -55,7 +67,6 @@ jit_flush_code(void *dest, void *end)
|
|||
and more common (Fedora, for example), so we implement our
|
||||
jit_flush_code as an mprotect. */
|
||||
#if defined(MZ_JIT_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||
static unsigned long prev_page = 0, prev_length = 0;
|
||||
unsigned long page, length;
|
||||
# ifdef PAGESIZE
|
||||
const long page_size = PAGESIZE;
|
||||
|
@ -77,7 +88,7 @@ jit_flush_code(void *dest, void *end)
|
|||
|
||||
/* Simple-minded attempt at optimizing the common case where a single
|
||||
chunk of memory is used to compile multiple functions. */
|
||||
if (page >= prev_page && page + length <= prev_page + prev_length)
|
||||
if (page >= jit_prev_page && page + length <= jit_prev_page + jit_prev_length)
|
||||
return;
|
||||
|
||||
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||
|
@ -91,18 +102,18 @@ jit_flush_code(void *dest, void *end)
|
|||
|
||||
/* See if we can extend the previously mprotect'ed memory area towards
|
||||
higher addresses: the starting address remains the same as before. */
|
||||
if (page >= prev_page && page <= prev_page + prev_length)
|
||||
prev_length = page + length - prev_page;
|
||||
if (page >= jit_prev_page && page <= jit_prev_page + jit_prev_length)
|
||||
jit_prev_length = page + length - jit_prev_page;
|
||||
|
||||
/* See if we can extend the previously mprotect'ed memory area towards
|
||||
lower addresses: the highest address remains the same as before. */
|
||||
else if (page < prev_page && page + length >= prev_page
|
||||
&& page + length <= prev_page + prev_length)
|
||||
prev_length += prev_page - page, prev_page = page;
|
||||
else if (page < jit_prev_page && page + length >= jit_prev_page
|
||||
&& page + length <= jit_prev_page + jit_prev_length)
|
||||
jit_prev_length += jit_prev_page - page, jit_prev_page = page;
|
||||
|
||||
/* Nothing to do, replace the area. */
|
||||
else
|
||||
prev_page = page, prev_length = length;
|
||||
jit_prev_page = page, jit_prev_length = length;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
#ifndef __lightning_funcs_h
|
||||
#define __lightning_funcs_h
|
||||
|
||||
static void
|
||||
jit_notify_freed_code(void)
|
||||
{
|
||||
}
|
||||
|
||||
#if !defined(__GNUC__) && !defined(__GNUG__)
|
||||
#error Go get GNU C, I do not know how to flush the cache
|
||||
#error with this compiler.
|
||||
|
|
Loading…
Reference in New Issue
Block a user