SenoraGC: support allocation of executable memory; tune for performance
Allocation of executable memory is intended to make SELinux happier by mmapping with PROT_EXEC instead of using mprotect() to allow execution after the fact. Performance improvements bring SGC within 30% of the Boehm GC on `racketcgc -cl racket`, which makes SGC an even more plausible substitute.
This commit is contained in:
parent
676dfebce6
commit
2916fc34cc
|
@ -5,8 +5,10 @@
|
|||
static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, intptr_t *size_diff)
|
||||
Requires (defined earlier):
|
||||
my_qsort --- possibly from my_qsort.c
|
||||
static void os_vm_free_pages(void *p, size_t len);
|
||||
static void *os_vm_alloc_pages(size_t len);
|
||||
static void os_free_pages(void *p, size_t len);
|
||||
static void *os_alloc_pages(size_t len);
|
||||
static void *ofm_malloc_zero(size_t len);
|
||||
APAGE_SIZE (for cache heuristic)
|
||||
*/
|
||||
|
||||
/* Controls how often freed pages are actually returned to OS: */
|
||||
|
@ -18,10 +20,18 @@
|
|||
/* Controls how many extra pages are requested from OS at a time: */
|
||||
#define CACHE_SEED_PAGES 16
|
||||
|
||||
typedef struct AllocCacheBlock {
|
||||
char *start;
|
||||
intptr_t len;
|
||||
short age;
|
||||
short zeroed;
|
||||
} AllocCacheBlock;
|
||||
|
||||
static AllocCacheBlock *alloc_cache_create() {
|
||||
return ofm_malloc_zero(sizeof(AllocCacheBlock) * BLOCKFREE_CACHE_SIZE);
|
||||
}
|
||||
|
||||
#ifndef NO_ALLOC_CACHE_FREE
|
||||
static intptr_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree);
|
||||
static intptr_t alloc_cache_free(AllocCacheBlock *ac) {
|
||||
if (ac) {
|
||||
|
@ -31,6 +41,7 @@ static intptr_t alloc_cache_free(AllocCacheBlock *ac) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int alloc_cache_block_compare(const void *a, const void *b)
|
||||
{
|
||||
|
@ -172,6 +183,7 @@ static intptr_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree)
|
|||
return freed;
|
||||
}
|
||||
|
||||
#ifndef NO_ALLOC_CACHE_FREE
|
||||
static intptr_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree)
|
||||
{
|
||||
int i;
|
||||
|
@ -188,6 +200,7 @@ static intptr_t alloc_cache_free_all_pages(AllocCacheBlock *blockfree)
|
|||
}
|
||||
return freed;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Instead of immediately freeing pages with munmap---only to mmap
|
||||
them again---we cache BLOCKFREE_CACHE_SIZE freed pages. A page is
|
||||
|
|
|
@ -11,11 +11,12 @@ static void os_protect_pages(void *p, size_t len, int writable);
|
|||
#define BC_MAX_BLOCK_SIZE (1 << 24) /* 16 MB */
|
||||
|
||||
struct block_desc;
|
||||
static AllocCacheBlock *alloc_cache_create();
|
||||
static ssize_t alloc_cache_free(AllocCacheBlock *);
|
||||
static ssize_t alloc_cache_free_page(AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here);
|
||||
static ssize_t alloc_cache_flush_freed_pages(AllocCacheBlock *blockfree);
|
||||
static void *alloc_cache_alloc_page(AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff);
|
||||
struct AllocCacheBlock;
|
||||
static struct AllocCacheBlock *alloc_cache_create();
|
||||
static ssize_t alloc_cache_free(struct AllocCacheBlock *);
|
||||
static ssize_t alloc_cache_free_page(struct AllocCacheBlock *blockfree, char *p, size_t len, int dirty, int originated_here);
|
||||
static ssize_t alloc_cache_flush_freed_pages(struct AllocCacheBlock *blockfree);
|
||||
static void *alloc_cache_alloc_page(struct AllocCacheBlock *blockfree, size_t len, size_t alignment, int dirty_ok, ssize_t *size_diff);
|
||||
|
||||
static Page_Range *page_range_create();
|
||||
static void page_range_free(Page_Range *pr);
|
||||
|
@ -50,7 +51,7 @@ typedef struct block_group {
|
|||
typedef struct BlockCache {
|
||||
block_group atomic;
|
||||
block_group non_atomic;
|
||||
AllocCacheBlock *bigBlockCache;
|
||||
struct AllocCacheBlock *bigBlockCache;
|
||||
Page_Range *page_range;
|
||||
MMU *mmu;
|
||||
} BlockCache;
|
||||
|
|
|
@ -106,13 +106,6 @@ typedef struct Page_Range {
|
|||
uintptr_t range_alloc_used;
|
||||
} Page_Range;
|
||||
|
||||
typedef struct {
|
||||
char *start;
|
||||
intptr_t len;
|
||||
short age;
|
||||
short zeroed;
|
||||
} AllocCacheBlock;
|
||||
|
||||
#ifdef MZ_USE_PLACES
|
||||
typedef struct NewGCMasterInfo {
|
||||
uintptr_t size;
|
||||
|
|
|
@ -121,17 +121,19 @@
|
|||
/* Instead of calling malloc() to get low-level memory, use
|
||||
sbrk() directly. (Unix) */
|
||||
|
||||
#define GET_MEM_VIA_MMAP SGC_STD_DEBUGGING_UNIX
|
||||
#define GET_MEM_VIA_MMAP (SGC_STD_DEBUGGING_UNIX || defined(HAVE_MMAP_MPROTECT))
|
||||
/* Instead of calling malloc() to get low-level memory, use
|
||||
mmap() directly. (Unix) */
|
||||
|
||||
#define GET_MEM_VIA_VIRTUAL_ALLOC SGC_STD_DEBUGGING_WINDOWS
|
||||
#define GET_MEM_VIA_VIRTUAL_ALLOC (SGC_STD_DEBUGGING_WINDOWS || defined(WIN32))
|
||||
/* Instead of calling malloc() to get low-level memory, use
|
||||
VirtualAlloc() directly. (Win32) */
|
||||
|
||||
#define RELEASE_UNUSED_SECTORS 1
|
||||
/* Instead of managing a list of unused sectors, they are
|
||||
given back to the OS. This only works with mmap(). */
|
||||
given back to the OS. This only works with mmap(), and
|
||||
it's required for an allocator that distinguishes executable
|
||||
and non-executable sectors. */
|
||||
|
||||
#define DISTINGUISH_FREE_FROM_UNMARKED 0
|
||||
/* Don't let conservatism resurrect a previously-collected block */
|
||||
|
@ -314,7 +316,7 @@
|
|||
Since it should be a power of 2, LOG_SECTOR_SEGMENT_SIZE is
|
||||
specified directly. A larger block size speeds up GC, but wastes
|
||||
more unallocated bytes in same-size buckets. */
|
||||
#define LOG_SECTOR_SEGMENT_SIZE 12
|
||||
#define LOG_SECTOR_SEGMENT_SIZE 14
|
||||
#define SECTOR_SEGMENT_SIZE (1 << LOG_SECTOR_SEGMENT_SIZE)
|
||||
#define SECTOR_SEGMENT_MASK (~(SECTOR_SEGMENT_SIZE-1))
|
||||
|
||||
|
@ -668,7 +670,7 @@ static void add_freepage(SectorFreepage *naya)
|
|||
#define EACH_TABLE_COUNT (1 << (LOG_SECTOR_SEGMENT_SIZE - LOG_PTR_SIZE))
|
||||
|
||||
typedef struct GC_Set {
|
||||
short atomic, uncollectable;
|
||||
short atomic, uncollectable, code;
|
||||
#if ALLOW_SET_LOCKING
|
||||
short locked;
|
||||
#endif
|
||||
|
@ -709,8 +711,10 @@ static BlockOfMemory *common[2 * NUM_COMMON_SIZE]; /* second half is `ends' arra
|
|||
static BlockOfMemory *atomic_common[2 * NUM_COMMON_SIZE];
|
||||
static BlockOfMemory *uncollectable_common[2 * NUM_COMMON_SIZE];
|
||||
static BlockOfMemory *uncollectable_atomic_common[2 * NUM_COMMON_SIZE];
|
||||
static BlockOfMemory *code_common[2 * NUM_COMMON_SIZE];
|
||||
static MemoryChunk *others, *atomic_others;
|
||||
static MemoryChunk *uncollectable_others, *uncollectable_atomic_others;
|
||||
static MemoryChunk *code_others;
|
||||
|
||||
static int *common_positionses[NUM_COMMON_SIZE];
|
||||
|
||||
|
@ -721,6 +725,7 @@ static MemoryChunk *sys_malloc_others;
|
|||
|
||||
#define do_malloc_ATOMIC 0x1
|
||||
#define do_malloc_UNCOLLECTABLE 0x2
|
||||
#define do_malloc_EXECUTABLE 0x4
|
||||
#if NO_ATOMIC
|
||||
# define do_malloc_ATOMIC_UNLESS_DISABLED 0
|
||||
#else
|
||||
|
@ -848,7 +853,7 @@ static SectorPage ***sector_pagetablesss[1 << 16];
|
|||
# define DECL_SECTOR_PAGETABLES SectorPage **sector_pagetables;
|
||||
# define GET_SECTOR_PAGETABLES(p) sector_pagetables = create_sector_pagetables(p)
|
||||
# define FIND_SECTOR_PAGETABLES(p) sector_pagetables = get_sector_pagetables(p)
|
||||
static void *malloc_plain_sector(int count);
|
||||
static void *malloc_plain_sector(int count, int executable);
|
||||
inline static SectorPage **create_sector_pagetables(uintptr_t p) {
|
||||
uintptr_t pos;
|
||||
SectorPage ***sector_pagetabless, **sector_pagetables;
|
||||
|
@ -856,7 +861,7 @@ inline static SectorPage **create_sector_pagetables(uintptr_t p) {
|
|||
sector_pagetabless = sector_pagetablesss[pos];
|
||||
if (!sector_pagetabless) {
|
||||
int c = (sizeof(SectorPage **) << 16) >> LOG_SECTOR_SEGMENT_SIZE;
|
||||
sector_pagetabless = (SectorPage ***)malloc_plain_sector(c);
|
||||
sector_pagetabless = (SectorPage ***)malloc_plain_sector(c, 0);
|
||||
memset(sector_pagetabless, 0, c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
sector_pagetablesss[pos] = sector_pagetabless;
|
||||
sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
|
@ -868,7 +873,7 @@ inline static SectorPage **create_sector_pagetables(uintptr_t p) {
|
|||
if (c < 0)
|
||||
c = 0;
|
||||
c = 1 << c;
|
||||
sector_pagetables = (SectorPage **)malloc_plain_sector(c);
|
||||
sector_pagetables = (SectorPage **)malloc_plain_sector(c, 0);
|
||||
memset(sector_pagetables, 0, c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
sector_pagetabless[pos] = sector_pagetables;
|
||||
|
@ -913,7 +918,7 @@ static SectorPage **sector_pagetables;
|
|||
*/
|
||||
|
||||
#if GET_MEM_VIA_SBRK
|
||||
static void *platform_plain_sector(int count)
|
||||
static void *platform_plain_sector(int count, int executable)
|
||||
{
|
||||
caddr_t cur_brk = (caddr_t)sbrk(0);
|
||||
intptr_t lsbs = (uintptr_t)cur_brk & TABLE_LO_MASK;
|
||||
|
@ -933,28 +938,96 @@ static void *platform_plain_sector(int count)
|
|||
}
|
||||
#endif
|
||||
#if GET_MEM_VIA_MMAP
|
||||
static void *platform_plain_sector(int count)
|
||||
static void *mmap_sector(int count, int executable)
|
||||
{
|
||||
uintptr_t pre_extra;
|
||||
void *p;
|
||||
#ifdef MAP_ANON
|
||||
return mmap(NULL, count << LOG_SECTOR_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
int fd = -1;
|
||||
int flags = MAP_ANON;
|
||||
#else
|
||||
static int fd;
|
||||
int flags = 0;
|
||||
|
||||
if (!fd) {
|
||||
if (!fd)
|
||||
fd = open("/dev/zero", O_RDWR);
|
||||
}
|
||||
|
||||
return mmap(0, count << LOG_SECTOR_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||
#endif
|
||||
|
||||
p = mmap(NULL, (count + 1) << LOG_SECTOR_SEGMENT_SIZE,
|
||||
PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0),
|
||||
MAP_PRIVATE | flags, fd, 0);
|
||||
|
||||
pre_extra = (uintptr_t)p & (SECTOR_SEGMENT_SIZE - 1);
|
||||
if (pre_extra)
|
||||
pre_extra = SECTOR_SEGMENT_SIZE - pre_extra;
|
||||
if (pre_extra)
|
||||
munmap(p, pre_extra);
|
||||
if (pre_extra < SECTOR_SEGMENT_SIZE)
|
||||
munmap((char *)p + pre_extra + (count << LOG_SECTOR_SEGMENT_SIZE),
|
||||
SECTOR_SEGMENT_SIZE - pre_extra);
|
||||
|
||||
return (char *)p + pre_extra;
|
||||
}
|
||||
|
||||
static void free_plain_sector(void *p, int count)
|
||||
static void munmap_sector(void *p, int count)
|
||||
{
|
||||
munmap(p, count << LOG_SECTOR_SEGMENT_SIZE);
|
||||
}
|
||||
|
||||
static void *os_alloc_pages(size_t len)
|
||||
{
|
||||
return mmap_sector(len >> LOG_SECTOR_SEGMENT_SIZE, 0);
|
||||
}
|
||||
|
||||
static void os_free_pages(void *p, size_t len)
|
||||
{
|
||||
munmap_sector(p, len >> LOG_SECTOR_SEGMENT_SIZE);
|
||||
}
|
||||
|
||||
static void *ofm_malloc_zero(size_t len)
|
||||
{
|
||||
return mmap_sector((len >> LOG_SECTOR_SEGMENT_SIZE) + 1, 0);
|
||||
}
|
||||
|
||||
# define APAGE_SIZE SECTOR_SEGMENT_SIZE
|
||||
# define NO_ALLOC_CACHE_FREE
|
||||
# include "../gc2/my_qsort.c"
|
||||
# include "../gc2/alloc_cache.c"
|
||||
static AllocCacheBlock *alloc_cache;
|
||||
|
||||
static void *platform_plain_sector(int count, int executable)
|
||||
{
|
||||
intptr_t sd;
|
||||
|
||||
if (executable)
|
||||
return mmap_sector(count, 1);
|
||||
|
||||
if (!alloc_cache)
|
||||
alloc_cache = alloc_cache_create();
|
||||
|
||||
return alloc_cache_alloc_page(alloc_cache, count << LOG_SECTOR_SEGMENT_SIZE, LOG_SECTOR_SEGMENT_SIZE, 0, &sd);
|
||||
}
|
||||
|
||||
static void free_plain_sector(void *p, int count, int executable)
|
||||
{
|
||||
if (executable) {
|
||||
munmap_sector(p, count);
|
||||
return;
|
||||
}
|
||||
|
||||
(void)alloc_cache_free_page(alloc_cache, p, count << LOG_SECTOR_SEGMENT_SIZE, 1, 1);
|
||||
}
|
||||
|
||||
#define FLUSH_SECTOR_FREES
|
||||
static void flush_freed_sectors()
|
||||
{
|
||||
if (alloc_cache)
|
||||
alloc_cache_flush_freed_pages(alloc_cache);
|
||||
}
|
||||
|
||||
#endif
|
||||
#if GET_MEM_VIA_VIRTUAL_ALLOC
|
||||
static void *platform_plain_sector(int count)
|
||||
static void *platform_plain_sector(int count, int executable)
|
||||
{
|
||||
/* Since 64k blocks are used up by each call to VirtualAlloc,
|
||||
use roughly the same trick as in the malloc-based alloc to
|
||||
|
@ -967,7 +1040,7 @@ static void *platform_plain_sector(int count)
|
|||
prealloced = SECTOR_SEGMENT_GROUP_SIZE;
|
||||
preallocptr = VirtualAlloc(NULL, prealloced << LOG_SECTOR_SEGMENT_SIZE,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
|
||||
}
|
||||
|
||||
if (count <= prealloced) {
|
||||
|
@ -979,7 +1052,7 @@ static void *platform_plain_sector(int count)
|
|||
|
||||
return VirtualAlloc(NULL, count << LOG_SECTOR_SEGMENT_SIZE,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -991,7 +1064,7 @@ static void *platform_plain_sector(int count)
|
|||
<<
|
||||
# endif
|
||||
|
||||
static void *platform_plain_sector(int count)
|
||||
static void *platform_plain_sector(int count, int executable)
|
||||
{
|
||||
static int prealloced;
|
||||
static void *preallocptr;
|
||||
|
@ -1035,11 +1108,11 @@ static void *platform_plain_sector(int count)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void *malloc_plain_sector(int count)
|
||||
static void *malloc_plain_sector(int count, int executable)
|
||||
{
|
||||
void *m;
|
||||
|
||||
m = platform_plain_sector(count);
|
||||
m = platform_plain_sector(count, executable);
|
||||
|
||||
if (!m) {
|
||||
if (GC_out_of_memory)
|
||||
|
@ -1075,7 +1148,7 @@ static void register_sector(void *naya, int need, intptr_t kind)
|
|||
if (c < 0)
|
||||
c = 0;
|
||||
c = 1 << c;
|
||||
pagetable = (SectorPage *)malloc_plain_sector(c);
|
||||
pagetable = (SectorPage *)malloc_plain_sector(c, 0);
|
||||
sector_pagetables[pagetableindex] = pagetable;
|
||||
sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
for (j = 0; j < SECTOR_LOOKUP_PAGESIZE; j++) {
|
||||
|
@ -1090,7 +1163,7 @@ static void register_sector(void *naya, int need, intptr_t kind)
|
|||
}
|
||||
}
|
||||
|
||||
static void *malloc_sector(intptr_t size, intptr_t kind, int no_new)
|
||||
static void *malloc_sector(intptr_t size, intptr_t kind, int no_new, int executable)
|
||||
{
|
||||
intptr_t need;
|
||||
void *naya;
|
||||
|
@ -1113,7 +1186,7 @@ static void *malloc_sector(intptr_t size, intptr_t kind, int no_new)
|
|||
if (c < 0)
|
||||
c = 0;
|
||||
c = 1 << c;
|
||||
sector_pagetables = (SectorPage **)malloc_plain_sector(c);
|
||||
sector_pagetables = (SectorPage **)malloc_plain_sector(c, 0);
|
||||
sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
for (i = 0; i < (1 << SECTOR_LOOKUP_PAGESETBITS); i++)
|
||||
sector_pagetables[i] = NULL;
|
||||
|
@ -1164,14 +1237,14 @@ static void *malloc_sector(intptr_t size, intptr_t kind, int no_new)
|
|||
if (no_new)
|
||||
return NULL;
|
||||
|
||||
naya = malloc_plain_sector(need);
|
||||
naya = malloc_plain_sector(need, executable);
|
||||
sector_mem_use += (need << LOG_SECTOR_SEGMENT_SIZE);
|
||||
register_sector(naya, need, kind);
|
||||
|
||||
return naya;
|
||||
}
|
||||
|
||||
static void free_sector(void *p)
|
||||
static void free_sector(void *p, int executable)
|
||||
{
|
||||
uintptr_t s = PTR_TO_INT(p), t;
|
||||
int c = 0;
|
||||
|
@ -1206,7 +1279,7 @@ static void free_sector(void *p)
|
|||
#endif
|
||||
|
||||
#if RELEASE_UNUSED_SECTORS
|
||||
free_plain_sector(p, c);
|
||||
free_plain_sector(p, c, executable);
|
||||
sector_mem_use -= (c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
#else
|
||||
sector_free_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
|
||||
|
@ -1283,7 +1356,7 @@ static void *realloc_collect_temp(void *v, intptr_t oldsize, intptr_t newsize)
|
|||
#elif GET_MEM_VIA_MMAP
|
||||
void *naya;
|
||||
|
||||
naya = platform_plain_sector((newsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
|
||||
naya = platform_plain_sector((newsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE, 0);
|
||||
memcpy(naya, v, oldsize);
|
||||
if (v)
|
||||
munmap(v, (oldsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
|
||||
|
@ -1367,7 +1440,7 @@ static void *malloc_managed(intptr_t size)
|
|||
size += PTR_SIZE - (size & PTR_SIZE);
|
||||
|
||||
if (!managed) {
|
||||
managed = (Managed *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_other, 0);
|
||||
managed = (Managed *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_other, 0, 0);
|
||||
managed->num_buckets = 0;
|
||||
manage_real_mem_use += SECTOR_SEGMENT_SIZE;
|
||||
}
|
||||
|
@ -1383,7 +1456,7 @@ static void *malloc_managed(intptr_t size)
|
|||
if (size < MAX_COMMON_SIZE) {
|
||||
int c;
|
||||
|
||||
mb = (ManagedBlock *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_managed, 0);
|
||||
mb = (ManagedBlock *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_managed, 0, 0);
|
||||
manage_real_mem_use += SECTOR_SEGMENT_SIZE;
|
||||
managed->buckets[i].block = mb;
|
||||
|
||||
|
@ -1394,7 +1467,7 @@ static void *malloc_managed(intptr_t size)
|
|||
managed->buckets[i].offset = c + sizeof(ManagedBlockHeader);
|
||||
} else {
|
||||
intptr_t l = size + sizeof(ManagedBlockHeader) + PTR_SIZE;
|
||||
mb = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0);
|
||||
mb = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0, 0);
|
||||
manage_real_mem_use += l;
|
||||
managed->buckets[i].block = mb;
|
||||
managed->buckets[i].perblock = 1;
|
||||
|
@ -1417,7 +1490,7 @@ static void *malloc_managed(intptr_t size)
|
|||
mb = mb->head.next;
|
||||
if (mb->head.count == perblock) {
|
||||
intptr_t l = offset + size * perblock;
|
||||
mb->head.next = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0);
|
||||
mb->head.next = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0, 0);
|
||||
manage_real_mem_use += l;
|
||||
mb->head.next->head.prev = mb;
|
||||
mb = mb->head.next;
|
||||
|
@ -1488,7 +1561,7 @@ static void free_managed(void *s)
|
|||
|
||||
manage_real_mem_use -= (bucket->offset + bucket->size * bucket->perblock);
|
||||
|
||||
free_sector(mb);
|
||||
free_sector(mb, 0);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -1513,8 +1586,8 @@ static void init_size_map()
|
|||
int i, j, find_half;
|
||||
intptr_t k, next;
|
||||
|
||||
size_index_map = (intptr_t *)malloc_sector(MAX_COMMON_SIZE, sector_kind_other, 0);
|
||||
size_map = (intptr_t *)malloc_sector(NUM_COMMON_SIZE * sizeof(intptr_t), sector_kind_other, 0);
|
||||
size_index_map = (intptr_t *)malloc_sector(MAX_COMMON_SIZE, sector_kind_other, 0, 0);
|
||||
size_map = (intptr_t *)malloc_sector(NUM_COMMON_SIZE * sizeof(intptr_t), sector_kind_other, 0, 0);
|
||||
|
||||
/* This is two loops instead of one to avoid a gcc 2.92.2 -O2 x86 bug: */
|
||||
for (i = 0; i < 8; i++) {
|
||||
|
@ -1612,15 +1685,16 @@ static void GC_initialize(void)
|
|||
int i;
|
||||
|
||||
#if PROVIDE_MALLOC_AND_FREE
|
||||
num_common_sets = 5;
|
||||
num_common_sets = 6;
|
||||
#else
|
||||
num_common_sets = 4;
|
||||
num_common_sets = 5;
|
||||
#endif
|
||||
common_sets = (GC_Set **)malloc_managed(sizeof(GC_Set*) * num_common_sets);
|
||||
|
||||
common_sets[0] = (GC_Set *)malloc_managed(sizeof(GC_Set));
|
||||
common_sets[0]->atomic = 0;
|
||||
common_sets[0]->uncollectable = 0;
|
||||
common_sets[0]->code = 0;
|
||||
common_sets[0]->blocks = common;
|
||||
common_sets[0]->block_ends = common + NUM_COMMON_SIZE;
|
||||
common_sets[0]->othersptr = &others;
|
||||
|
@ -1628,6 +1702,7 @@ static void GC_initialize(void)
|
|||
common_sets[1] = (GC_Set *)malloc_managed(sizeof(GC_Set));
|
||||
common_sets[1]->atomic = !NO_ATOMIC;
|
||||
common_sets[1]->uncollectable = 0;
|
||||
common_sets[1]->code = 0;
|
||||
common_sets[1]->blocks = atomic_common;
|
||||
common_sets[1]->block_ends = atomic_common + NUM_COMMON_SIZE;
|
||||
common_sets[1]->othersptr = &atomic_others;
|
||||
|
@ -1635,6 +1710,7 @@ static void GC_initialize(void)
|
|||
common_sets[2] = (GC_Set *)malloc_managed(sizeof(GC_Set));
|
||||
common_sets[2]->atomic = 0;
|
||||
common_sets[2]->uncollectable = 1;
|
||||
common_sets[2]->code = 1;
|
||||
common_sets[2]->blocks = uncollectable_common;
|
||||
common_sets[2]->block_ends = uncollectable_common + NUM_COMMON_SIZE;
|
||||
common_sets[2]->othersptr = &uncollectable_others;
|
||||
|
@ -1642,17 +1718,27 @@ static void GC_initialize(void)
|
|||
common_sets[3] = (GC_Set *)malloc_managed(sizeof(GC_Set));
|
||||
common_sets[3]->atomic = !NO_ATOMIC;
|
||||
common_sets[3]->uncollectable = 1;
|
||||
common_sets[3]->code = 1;
|
||||
common_sets[3]->blocks = uncollectable_atomic_common;
|
||||
common_sets[3]->block_ends = uncollectable_atomic_common + NUM_COMMON_SIZE;
|
||||
common_sets[3]->othersptr = &uncollectable_atomic_others;
|
||||
|
||||
#if PROVIDE_MALLOC_AND_FREE
|
||||
common_sets[4] = (GC_Set *)malloc_managed(sizeof(GC_Set));
|
||||
common_sets[4]->atomic = 1;
|
||||
common_sets[4]->uncollectable = 1;
|
||||
common_sets[4]->blocks = sys_malloc;
|
||||
common_sets[4]->block_ends = sys_malloc + NUM_COMMON_SIZE;
|
||||
common_sets[4]->othersptr = &sys_malloc_others;
|
||||
common_sets[4]->atomic = 0;
|
||||
common_sets[4]->uncollectable = 0;
|
||||
common_sets[4]->code = 1;
|
||||
common_sets[4]->blocks = code_common;
|
||||
common_sets[4]->block_ends = code_common + NUM_COMMON_SIZE;
|
||||
common_sets[4]->othersptr = &code_others;
|
||||
|
||||
#if PROVIDE_MALLOC_AND_FREE
|
||||
common_sets[5] = (GC_Set *)malloc_managed(sizeof(GC_Set));
|
||||
common_sets[5]->atomic = 1;
|
||||
common_sets[5]->uncollectable = 1;
|
||||
common_sets[5]->code = 0;
|
||||
common_sets[5]->blocks = sys_malloc;
|
||||
common_sets[5]->block_ends = sys_malloc + NUM_COMMON_SIZE;
|
||||
common_sets[5]->othersptr = &sys_malloc_others;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < num_common_sets; i++) {
|
||||
|
@ -1679,7 +1765,7 @@ static void GC_initialize(void)
|
|||
}
|
||||
|
||||
#if PROVIDE_MALLOC_AND_FREE
|
||||
common_sets[4]->name = "Sysmalloc";
|
||||
common_sets[5]->name = "Sysmalloc";
|
||||
#endif
|
||||
|
||||
initialized = 1;
|
||||
|
@ -1976,7 +2062,7 @@ void GC_dump(void)
|
|||
FPRINTF(STDERR,
|
||||
"Set: %s [%s/%s]: ======================================\n",
|
||||
cs->name,
|
||||
cs->atomic ? "atomic" : "pointerful",
|
||||
cs->atomic ? "atomic" : (cs->code ? "code" : "pointerful"),
|
||||
cs->uncollectable ? "eternal" : "collectable");
|
||||
|
||||
for (i = 0; i < NUM_COMMON_SIZE; i++) {
|
||||
|
@ -2119,7 +2205,7 @@ void GC_dump(void)
|
|||
FPRINTF(STDERR,
|
||||
"%12s: %10ld [%s/%s]\n",
|
||||
cs->name, cs->total,
|
||||
cs->atomic ? "atomic" : "pointerful",
|
||||
cs->atomic ? "atomic" : (cs->code ? "code" : "pointerful"),
|
||||
cs->uncollectable ? "eternal" : "collectable");
|
||||
total += cs->total;
|
||||
}
|
||||
|
@ -2188,7 +2274,7 @@ static void init_positions(int cpos, int size, int num_elems)
|
|||
int i, j, pos;
|
||||
int *positions;
|
||||
|
||||
positions = (int *)malloc_sector(num_offsets * sizeof(int), sector_kind_other, 0);
|
||||
positions = (int *)malloc_sector(num_offsets * sizeof(int), sector_kind_other, 0, 0);
|
||||
|
||||
for (i = 0, pos = 0, j = 0; i < num_offsets; ) {
|
||||
positions[i++] = pos;
|
||||
|
@ -2360,12 +2446,14 @@ static void *do_malloc(SET_NO_BACKINFO
|
|||
|
||||
cpos = 0;
|
||||
|
||||
a = malloc_sector(size + size_align(sizeof(MemoryChunk)), sector_kind_chunk, 1);
|
||||
a = malloc_sector(size + size_align(sizeof(MemoryChunk)), sector_kind_chunk, 1,
|
||||
flags & do_malloc_EXECUTABLE);
|
||||
if (!a) {
|
||||
if (mem_use >= mem_limit)
|
||||
GC_gcollect();
|
||||
|
||||
a = malloc_sector(size + size_align(sizeof(MemoryChunk)), sector_kind_chunk, 0);
|
||||
a = malloc_sector(size + size_align(sizeof(MemoryChunk)), sector_kind_chunk, 0,
|
||||
flags & do_malloc_EXECUTABLE);
|
||||
}
|
||||
|
||||
c = (MemoryChunk *)a;
|
||||
|
@ -2463,14 +2551,16 @@ static void *do_malloc(SET_NO_BACKINFO
|
|||
c = size_align(sizeof(BlockOfMemory)) + (PTR_SIZE - 1) + sizeElemBit;
|
||||
}
|
||||
|
||||
block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 1);
|
||||
block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 1,
|
||||
flags & do_malloc_EXECUTABLE);
|
||||
if (!block) {
|
||||
if ((mem_use >= mem_limit) && !GC_dont_gc) {
|
||||
GC_gcollect();
|
||||
return do_malloc(KEEP_SET_INFO_ARG(set_no)
|
||||
size, common, othersptr, flags);
|
||||
} else
|
||||
block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 0);
|
||||
block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 0,
|
||||
flags & do_malloc_EXECUTABLE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2675,6 +2765,13 @@ void *GC_malloc_atomic_uncollectable(size_t size)
|
|||
do_malloc_ATOMIC_UNLESS_DISABLED | do_malloc_UNCOLLECTABLE);
|
||||
}
|
||||
|
||||
void *GC_malloc_code(size_t size)
|
||||
{
|
||||
return do_malloc(KEEP_SET_INFO_ARG(4)
|
||||
size, code_common, &code_others,
|
||||
do_malloc_EXECUTABLE);
|
||||
}
|
||||
|
||||
void *GC_malloc_specific(size_t size, GC_Set *set)
|
||||
{
|
||||
return do_malloc(KEEP_SET_INFO_ARG(set->no)
|
||||
|
@ -2691,7 +2788,7 @@ void *GC_malloc_stubborn(size_t size)
|
|||
#if PROVIDE_MALLOC_AND_FREE
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
return do_malloc(KEEP_SET_INFO_ARG(4)
|
||||
return do_malloc(KEEP_SET_INFO_ARG(5)
|
||||
size, sys_malloc,
|
||||
&sys_malloc_others,
|
||||
do_malloc_ATOMIC | do_malloc_UNCOLLECTABLE);
|
||||
|
@ -2990,7 +3087,7 @@ static void free_chunk(MemoryChunk *k, MemoryChunk **prev, GC_Set *set)
|
|||
|
||||
*prev = next;
|
||||
|
||||
free_sector(k);
|
||||
free_sector(k, set->code);
|
||||
--num_chunks;
|
||||
}
|
||||
|
||||
|
@ -3033,7 +3130,7 @@ void GC_free(void *p)
|
|||
return;
|
||||
}
|
||||
# if EXTRA_FREE_CHECKS
|
||||
if (block->set_no != 4) {
|
||||
if (block->set_no != 5) {
|
||||
char b[256];
|
||||
sprintf(b, "GC_free on ptr from wrong block! %lx\n", (intptr_t)p);
|
||||
free_error(b);
|
||||
|
@ -3085,7 +3182,7 @@ void GC_free(void *p)
|
|||
GC_initialize();
|
||||
|
||||
# if CHECK_FREES && EXTRA_FREE_CHECKS
|
||||
if (chunk->set_no != 4) {
|
||||
if (chunk->set_no != 5) {
|
||||
char b[256];
|
||||
sprintf(b, "GC_free on ptr from wrong block! %lx\n", (intptr_t)p);
|
||||
free_error(b);
|
||||
|
@ -3413,7 +3510,7 @@ static void collect_finish_common(BlockOfMemory **blocks,
|
|||
--num_blocks;
|
||||
|
||||
*prev = block->next;
|
||||
free_sector(block);
|
||||
free_sector(block, set->code);
|
||||
mem_real_use -= SECTOR_SEGMENT_SIZE;
|
||||
block = *prev;
|
||||
} else {
|
||||
|
@ -4625,6 +4722,10 @@ static void do_GC_gcollect(void *stack_now)
|
|||
common_sets[j]);
|
||||
}
|
||||
|
||||
#ifdef FLUSH_SECTOR_FREES
|
||||
flush_freed_sectors();
|
||||
#endif
|
||||
|
||||
PRINTTIME((STDERR, "gc: all done: %ld\n", GETTIMEREL()));
|
||||
|
||||
# if PRINT
|
||||
|
|
|
@ -45,6 +45,7 @@ SGC_EXTERN void *GC_malloc_atomic(size_t size_in_bytes);
|
|||
SGC_EXTERN void *GC_malloc_stubborn(size_t size_in_bytes);
|
||||
SGC_EXTERN void *GC_malloc_uncollectable(size_t size_in_bytes);
|
||||
SGC_EXTERN void *GC_malloc_atomic_uncollectable(size_t size_in_bytes);
|
||||
SGC_EXTERN void *GC_malloc_code(size_t size_in_bytes);
|
||||
|
||||
typedef void (*GC_collect_start_callback_Proc)(void);
|
||||
typedef void (*GC_collect_end_callback_Proc)(void);
|
||||
|
|
|
@ -1271,9 +1271,13 @@ static uintptr_t jit_prev_page = 0, jit_prev_length = 0;
|
|||
void *scheme_malloc_gcable_code(intptr_t size)
|
||||
{
|
||||
void *p;
|
||||
|
||||
#ifdef USE_SENORA_GC
|
||||
p = GC_malloc_code(size);
|
||||
#else
|
||||
p = scheme_malloc(size);
|
||||
|
||||
#if defined(MZ_CODE_ALLOC_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||
# if defined(MZ_CODE_ALLOC_USE_MPROTECT) || defined(MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC)
|
||||
{
|
||||
/* [This chunk of code moved from our copy of GNU lightning to here.] */
|
||||
uintptr_t page, length, page_size;
|
||||
|
@ -1290,12 +1294,12 @@ void *scheme_malloc_gcable_code(intptr_t size)
|
|||
chunk of memory is used to compile multiple functions. */
|
||||
if (!(page >= jit_prev_page && page + length <= jit_prev_page + jit_prev_length)) {
|
||||
|
||||
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||
# ifdef MZ_JIT_USE_WINDOWS_VIRTUAL_ALLOC
|
||||
{
|
||||
DWORD old;
|
||||
VirtualProtect((void *)page, length, PAGE_EXECUTE_READWRITE, &old);
|
||||
}
|
||||
# else
|
||||
# else
|
||||
{
|
||||
int r;
|
||||
r = mprotect ((void *) page, length, PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
|
@ -1303,7 +1307,7 @@ void *scheme_malloc_gcable_code(intptr_t size)
|
|||
scheme_log_abort("mprotect for generate-code page failed; aborting");
|
||||
}
|
||||
}
|
||||
# endif
|
||||
# endif
|
||||
|
||||
/* See if we can extend the previously mprotect'ed memory area towards
|
||||
higher addresses: the starting address remains the same as before. */
|
||||
|
@ -1321,6 +1325,7 @@ void *scheme_malloc_gcable_code(intptr_t size)
|
|||
jit_prev_page = page, jit_prev_length = length;
|
||||
}
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
return p;
|
||||
|
|
Loading…
Reference in New Issue
Block a user