Variable Sized OBJHEAD

svn: r15010
This commit is contained in:
Kevin Tew 2009-05-29 19:39:29 +00:00
parent 95c52b9ce3
commit 12cdeff2b7
7 changed files with 345 additions and 224 deletions

View File

@ -314,14 +314,47 @@ foreign.@LTO@: $(XSRCDIR)/foreign.c
main.@LTO@: $(XSRCDIR)/main.c main.@LTO@: $(XSRCDIR)/main.c
$(CC) $(CFLAGS) -c $(XSRCDIR)/main.c -o main.@LTO@ $(CC) $(CFLAGS) -c $(XSRCDIR)/main.c -o main.@LTO@
gc2.@LTO@: $(srcdir)/gc2.c $(srcdir)/newgc.c $(srcdir)/gc2.h \ gc2.@LTO@: \
$(srcdir)/newgc.h $(srcdir)/mem_account.c \ $(srcdir)/alloc_cache.c \
$(srcdir)/sighand.c \ $(srcdir)/backtrace.c \
$(srcdir)/vm_osx.c $(srcdir)/vm_mmap.c $(srcdir)/vm_osk.c $(srcdir)/vm.c\ $(srcdir)/commongc_internal.h \
$(srcdir)/vm_memalign.c $(srcdir)/alloc_cache.c \ $(srcdir)/platforms.h \
$(srcdir)/page_range.c $(srcdir)/protect_range.c $(srcdir)/var_stack.c $(srcdir)/stack_comp.c \ $(srcdir)/fnls.c \
$(srcdir)/../utils/splay.c $(srcdir)/my_qsort.c $(srcdir)/backtrace.c \ $(srcdir)/gc2.c \
$(srcdir)/weak.c $(srcdir)/fnls.c $(srcdir)/../include/scheme.h $(srcdir)/../src/schpriv.h $(srcdir)/gc2.h \
$(srcdir)/gc2_dump.h \
$(srcdir)/gc2_obj.h \
$(srcdir)/immobile_boxes.c \
$(srcdir)/../include/scheme.h \
$(srcdir)/../include/../sconfig.h \
$(srcdir)/../include/../mzconfig.h \
$(srcdir)/../include/../uconfig.h \
$(srcdir)/../include/../src/stypes.h \
$(srcdir)/../include/../src/schexn.h \
$(srcdir)/../include/../gc2/gc2.h \
$(srcdir)/../include/../src/schemef.h \
$(srcdir)/../mzconfig.h \
$(srcdir)/../src/mzrt.h \
$(srcdir)/../src/schpriv.h \
$(srcdir)/../utils/splay.c \
$(srcdir)/mem_account.c \
$(srcdir)/msgprint.c \
$(srcdir)/my_qsort.c \
$(srcdir)/newgc.c \
$(srcdir)/newgc.h \
$(srcdir)/page_range.c \
$(srcdir)/protect_range.c \
$(srcdir)/rlimit_heapsize.c \
$(srcdir)/roots.c \
$(srcdir)/stack_comp.c \
$(srcdir)/sighand.c \
$(srcdir)/var_stack.c \
$(srcdir)/vm.c \
$(srcdir)/vm_memalign.c \
$(srcdir)/vm_mmap.c \
$(srcdir)/vm_osk.c \
$(srcdir)/vm_osx.c \
$(srcdir)/weak.c
$(CC) $(CFLAGS) -I$(builddir)/.. -c $(srcdir)/gc2.c -o gc2.@LTO@ $(CC) $(CFLAGS) -I$(builddir)/.. -c $(srcdir)/gc2.c -o gc2.@LTO@
FOREIGN_OBJS = ../../foreign/gcc/libffi/src/*.@LTO@ ../../foreign/gcc/libffi/src/*/*.@LTO@ FOREIGN_OBJS = ../../foreign/gcc/libffi/src/*.@LTO@ ../../foreign/gcc/libffi/src/*/*.@LTO@

View File

@ -36,6 +36,12 @@ typedef unsigned long (*GC_get_thread_stack_base_Proc)(void);
#endif #endif
#ifdef MZ_USE_PLACES
# define GC_OBJHEAD_SIZE (2*sizeof(unsigned long))
#else
# define GC_OBJHEAD_SIZE (sizeof(unsigned long))
#endif
#ifndef GC2_JUST_MACROS #ifndef GC2_JUST_MACROS
#include <stddef.h> #include <stddef.h>

View File

@ -7,29 +7,30 @@
#else #else
# define LOG_APAGE_SIZE 14 # define LOG_APAGE_SIZE 14
#endif #endif
typedef struct objhead {
#ifdef SIXTY_FOUR_BIT_INTEGERS # ifdef MZ_USE_PLACES
# define OBJH_WORD_SIZE 8 unsigned long filler;
#else
# define OBJH_WORD_SIZE 4
#endif #endif
unsigned long hash : ((8 * sizeof(unsigned long)) - (4+3+LOG_APAGE_SIZE) );
struct objhead {
unsigned long hash : ((8*OBJH_WORD_SIZE) - (4+3+LOG_APAGE_SIZE));
/* the type and size of the object */ /* the type and size of the object */
unsigned long type : 3; unsigned long type : 3;
/* these are the various mark bits we use */ /* these are the various mark bits we use */
unsigned long mark : 1; unsigned long mark : 1;
unsigned long btc_mark : 1; unsigned long btc_mark : 1;
/* these are used for compaction et al*/ /* these are used for compaction et al*/
unsigned long moved : 1; unsigned long moved : 1;
unsigned long dead : 1; unsigned long dead : 1;
unsigned long size : LOG_APAGE_SIZE; unsigned long size : LOG_APAGE_SIZE;
}; } objhead;
#define OBJHEAD_SIZE (sizeof(objhead))
#define OBJPTR_TO_OBJHEAD(p) ((objhead *) (((char *)(p)) - OBJHEAD_SIZE))
#define OBJHEAD_TO_OBJPTR(p) ((void *) (((char *)(p)) + OBJHEAD_SIZE))
XFORM_NONGCING extern int GC_is_allocated(void *p); XFORM_NONGCING extern int GC_is_allocated(void *p);
#define OBJHEAD_HAS_HASH_BITS #define OBJHEAD_HAS_HASH_BITS
#define OBJHEAD_HASH_BITS(p) ((struct objhead *)((void **)p - 1))->hash #define OBJHEAD_HASH_BITS(p) (OBJPTR_TO_OBJHEAD(p)->hash)
#endif #endif

View File

@ -242,7 +242,7 @@ inline static void BTC_memory_account_mark(NewGC *gc, mpage *page, void *ptr)
if(page->size_class) { if(page->size_class) {
if(page->size_class > 1) { if(page->size_class > 1) {
/* big page */ /* big page */
struct objhead *info = (struct objhead *)(NUM(page->addr) + PREFIX_SIZE); objhead *info = (objhead *)(NUM(page->addr) + PREFIX_SIZE);
if(info->btc_mark == gc->old_btc_mark) { if(info->btc_mark == gc->old_btc_mark) {
info->btc_mark = gc->new_btc_mark; info->btc_mark = gc->new_btc_mark;
@ -251,17 +251,17 @@ inline static void BTC_memory_account_mark(NewGC *gc, mpage *page, void *ptr)
} }
} else { } else {
/* medium page */ /* medium page */
struct objhead *info = MED_OBJHEAD(ptr, page->size); objhead *info = MED_OBJHEAD(ptr, page->size);
if(info->btc_mark == gc->old_btc_mark) { if(info->btc_mark == gc->old_btc_mark) {
info->btc_mark = gc->new_btc_mark; info->btc_mark = gc->new_btc_mark;
account_memory(gc, gc->current_mark_owner, info->size); account_memory(gc, gc->current_mark_owner, info->size);
ptr = PTR(NUM(info) + WORD_SIZE); ptr = OBJHEAD_TO_OBJPTR(info);
push_ptr(ptr); push_ptr(ptr);
} }
} }
} else { } else {
struct objhead *info = (struct objhead *)((char*)ptr - WORD_SIZE); objhead *info = OBJPTR_TO_OBJHEAD(ptr);
if(info->btc_mark == gc->old_btc_mark) { if(info->btc_mark == gc->old_btc_mark) {
info->btc_mark = gc->new_btc_mark; info->btc_mark = gc->new_btc_mark;
@ -302,7 +302,7 @@ int BTC_thread_mark(void *p)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
if (gc->doing_memory_accounting) { if (gc->doing_memory_accounting) {
return ((struct objhead *)(NUM(p) - WORD_SIZE))->size; return OBJPTR_TO_OBJHEAD(p)->size;
} }
return gc->mark_table[btc_redirect_thread](p); return gc->mark_table[btc_redirect_thread](p);
} }
@ -314,7 +314,7 @@ int BTC_custodian_mark(void *p)
if(custodian_to_owner_set(gc, p) == gc->current_mark_owner) if(custodian_to_owner_set(gc, p) == gc->current_mark_owner)
return gc->mark_table[btc_redirect_custodian](p); return gc->mark_table[btc_redirect_custodian](p);
else else
return ((struct objhead *)(NUM(p) - WORD_SIZE))->size; return OBJPTR_TO_OBJHEAD(p)->size;
} }
return gc->mark_table[btc_redirect_custodian](p); return gc->mark_table[btc_redirect_custodian](p);
} }
@ -323,7 +323,7 @@ int BTC_cust_box_mark(void *p)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
if (gc->doing_memory_accounting) { if (gc->doing_memory_accounting) {
return ((struct objhead *)(NUM(p) - WORD_SIZE))->size; return OBJPTR_TO_OBJHEAD(p)->size;
} }
return gc->mark_table[btc_redirect_cust_box](p); return gc->mark_table[btc_redirect_cust_box](p);
} }
@ -343,16 +343,18 @@ inline static void mark_normal_obj(NewGC *gc, int type, void *ptr)
} }
case PAGE_ATOMIC: break; case PAGE_ATOMIC: break;
case PAGE_ARRAY: { case PAGE_ARRAY: {
struct objhead *info = (struct objhead *)((char*)ptr - WORD_SIZE); objhead *info = OBJPTR_TO_OBJHEAD(ptr);
void **temp = ptr, **end = temp + (info->size - 1); void **temp = ptr;
void **end = PPTR(info) + info->size;
while(temp < end) gcMARK(*(temp++)); while(temp < end) gcMARK(*(temp++));
break; break;
}; };
case PAGE_TARRAY: { case PAGE_TARRAY: {
struct objhead *info = (struct objhead *)((char*)ptr - WORD_SIZE); objhead *info = OBJPTR_TO_OBJHEAD(ptr);
unsigned short tag = *(unsigned short*)ptr; unsigned short tag = *(unsigned short*)ptr;
void **temp = ptr, **end = PPTR(info) + (info->size - INSET_WORDS); void **temp = ptr;
void **end = PPTR(info) + (info->size - INSET_WORDS);
while(temp < end) temp += gc->mark_table[tag](temp); while(temp < end) temp += gc->mark_table[tag](temp);
break; break;
@ -363,7 +365,7 @@ inline static void mark_normal_obj(NewGC *gc, int type, void *ptr)
inline static void mark_acc_big_page(NewGC *gc, mpage *page) inline static void mark_acc_big_page(NewGC *gc, mpage *page)
{ {
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
void **end = PPTR(NUM(page->addr) + page->size); void **end = PPTR(NUM(page->addr) + page->size);
switch(page->page_type) { switch(page->page_type) {
@ -407,8 +409,8 @@ static void propagate_accounting_marks(NewGC *gc)
if (page->size_class > 1) if (page->size_class > 1)
mark_acc_big_page(gc, page); mark_acc_big_page(gc, page);
else { else {
struct objhead *info = MED_OBJHEAD(p, page->size); objhead *info = MED_OBJHEAD(p, page->size);
p = PTR(NUM(info) + WORD_SIZE); p = OBJHEAD_TO_OBJPTR(info);
mark_normal_obj(gc, info->type, p); mark_normal_obj(gc, info->type, p);
} }
} else } else
@ -701,6 +703,6 @@ static inline void BTC_clean_up(NewGC *gc) {
} }
static inline void BTC_set_btc_mark(NewGC *gc, void* x) { static inline void BTC_set_btc_mark(NewGC *gc, void* x) {
((struct objhead *)(x))->btc_mark = gc->old_btc_mark; ((objhead *)(x))->btc_mark = gc->old_btc_mark;
} }
#endif #endif

View File

@ -426,7 +426,10 @@ int GC_is_allocated(void *p)
the "- 3" is basically used as a fudge/safety factor, and has no real, the "- 3" is basically used as a fudge/safety factor, and has no real,
important meaning. */ important meaning. */
#define MAX_OBJECT_SIZEW (gcBYTES_TO_WORDS(APAGE_SIZE) - PREFIX_WSIZE - 3) #define MAX_OBJECT_SIZEW (gcBYTES_TO_WORDS(APAGE_SIZE) - PREFIX_WSIZE - 3)
#define MAX_OBJECT_SIZE (gcWORDS_TO_BYTES(MAX_OBJECT_SIZEW))
#define ASSERT_TAG(tag) assert((tag) >= 0 && (tag) <= NUMBER_OF_TAGS)
#define ASSERT_VALID_OBJPTR(objptr) assert(!((long)(objptr) & (0x3)))
/* Generation 0. Generation 0 is a set of very large pages in a list(gc->gen0.pages), /* Generation 0. Generation 0 is a set of very large pages in a list(gc->gen0.pages),
plus a set of smaller bigpages in a separate list(gc->gen0.big_pages). plus a set of smaller bigpages in a separate list(gc->gen0.big_pages).
@ -468,16 +471,52 @@ static void free_mpage(struct mpage *page)
static inline int BTC_single_allocation_limit(NewGC *gc, size_t sizeb); static inline int BTC_single_allocation_limit(NewGC *gc, size_t sizeb);
#endif #endif
/* ALIGN_BYTES_SIZE DOES NOT assume that the argument is already word-aligned. */
/* INSET_WORDS is how many words in a tagged array can be padding, plus one; it
must also be no more than the minimum size of a tagged element. */
#ifdef GC_ALIGN_SIXTEEN
# ifdef SIXTY_FOUR_BIT_INTEGERS
# define ALIGN_SIZE(sizew) (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & ((2 * WORD_SIZE) -1)) ? ((sizeb) + ((2 * WORD_SIZE) - ((sizeb) & ((2 * WORD_SIZE) - 1)))) : (sizeb))
# define INSET_WORDS 1
# else
# define ALIGN_SIZE(sizew) (((sizew) & 0x3) ? ((sizew) + (4 - ((sizew) & 0x3))) : (sizew))
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & ((4 * WORD_SIZE) - 1)) ? ((sizeb) + ((4 * WORD_SIZE) - ((sizeb) & ((4 * WORD_SIZE) - 1)))) : (sizeb))
# define INSET_WORDS 3
# endif
#else
# ifdef GC_ALIGN_EIGHT
# ifdef SIXTY_FOUR_BIT_INTEGERS
# define ALIGN_SIZE(sizew) (sizew)
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & (WORD_SIZE -1)) ? ((sizeb) + (WORD_SIZE - ((sizeb) & (WORD_SIZE - 1)))) : (sizeb))
# define INSET_WORDS 0
# else
# define ALIGN_SIZE(sizew) (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & ((2 * WORD_SIZE) -1)) ? ((sizeb) + ((2 * WORD_SIZE) - ((sizeb) & ((2 * WORD_SIZE) - 1)))) : (sizeb))
# define INSET_WORDS 1
# endif
# else
# define ALIGN_SIZE(sizew) (sizew)
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & (3)) ? ((sizeb) + (4 - ((sizeb) & (3)))) : (sizeb))
# define INSET_WORDS 0
# endif
#endif
#define COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(s) (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE))
#define COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(s) (ALIGN_BYTES_SIZE((s) + OBJHEAD_SIZE + PREFIX_SIZE))
#define BIG_PAGE_TO_OBJECT(big_page) ((void *) (((char *)((big_page)->addr)) + OBJHEAD_SIZE + PREFIX_SIZE))
#define MED_OBJHEAD_TO_OBJECT(ptr, page_size) ((void*) (((char *)MED_OBJHEAD((ptr), (page_size))) + OBJHEAD_SIZE));
/* the core allocation functions */ /* the core allocation functions */
static void *allocate_big(size_t sizeb, int type) static void *allocate_big(const size_t request_size_bytes, int type)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
mpage *bpage; mpage *bpage;
void *addr; size_t allocate_size;
#ifdef NEWGC_BTC_ACCOUNT #ifdef NEWGC_BTC_ACCOUNT
if(GC_out_of_memory) { if(GC_out_of_memory) {
if (BTC_single_allocation_limit(gc, sizeb)) { if (BTC_single_allocation_limit(gc, request_size_bytes)) {
/* We're allowed to fail. Check for allocations that exceed a single-time /* We're allowed to fail. Check for allocations that exceed a single-time
limit. Otherwise, the limit doesn't work as intended, because limit. Otherwise, the limit doesn't work as intended, because
a program can allocate a large block that nearly exhausts memory, a program can allocate a large block that nearly exhausts memory,
@ -494,24 +533,23 @@ static void *allocate_big(size_t sizeb, int type)
plus one word for the object header. plus one word for the object header.
This last serves many purposes, including making sure the object is This last serves many purposes, including making sure the object is
aligned for Sparcs. */ aligned for Sparcs. */
sizeb = gcWORDS_TO_BYTES((gcBYTES_TO_WORDS(sizeb) + PREFIX_WSIZE + 1)); allocate_size = COMPUTE_ALLOC_SIZE_FOR_BIG_PAGE_SIZE(request_size_bytes);
if((gc->gen0.current_size + sizeb) >= gc->gen0.max_size) { if((gc->gen0.current_size + allocate_size) >= gc->gen0.max_size) {
if (!gc->dumping_avoid_collection) if (!gc->dumping_avoid_collection)
garbage_collect(gc, 0); garbage_collect(gc, 0);
} }
gc->gen0.current_size += sizeb; gc->gen0.current_size += allocate_size;
/* We not only need APAGE_SIZE alignment, we /* We not only need APAGE_SIZE alignment, we
need everything consisently mapped within an APAGE_SIZE need everything consisently mapped within an APAGE_SIZE
segment. So round up. */ segment. So round up. */
bpage = malloc_mpage(); bpage = malloc_mpage();
if (type == PAGE_ATOMIC) if (type == PAGE_ATOMIC)
addr = malloc_dirty_pages(gc, round_to_apage_size(sizeb), APAGE_SIZE); bpage->addr = malloc_dirty_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE);
else else
addr = malloc_pages(gc, round_to_apage_size(sizeb), APAGE_SIZE); bpage->addr = malloc_pages(gc, round_to_apage_size(allocate_size), APAGE_SIZE);
bpage->addr = addr; bpage->size = allocate_size;
bpage->size = sizeb;
bpage->size_class = 2; bpage->size_class = 2;
bpage->page_type = type; bpage->page_type = type;
@ -521,40 +559,13 @@ static void *allocate_big(size_t sizeb, int type)
gc->gen0.big_pages = bpage; gc->gen0.big_pages = bpage;
pagemap_add(gc->page_maps, bpage); pagemap_add(gc->page_maps, bpage);
return PTR(NUM(addr) + PREFIX_SIZE + WORD_SIZE); {
void * objptr = BIG_PAGE_TO_OBJECT(bpage);
ASSERT_VALID_OBJPTR(objptr);
return objptr;
}
} }
/* ALIGN_BYTES_SIZE can assume that the argument is already word-aligned. */
/* INSET_WORDS is how many words in a tagged array can be padding, plus one; it
must also be no more than the minimum size of a tagged element. */
#ifdef GC_ALIGN_SIXTEEN
# ifdef SIXTY_FOUR_BIT_INTEGERS
# define ALIGN_SIZE(sizew) (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & WORD_SIZE) ? ((sizeb) + WORD_SIZE) : (sizeb))
# define INSET_WORDS 1
# else
# define ALIGN_SIZE(sizew) (((sizew) & 0x3) ? ((sizew) + (4 - ((sizew) & 0x3))) : (sizew))
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & (3 * WORD_SIZE)) ? ((sizeb) + ((4 * WORD_SIZE) - ((sizeb) & (3 * WORD_SIZE)))) : (sizeb))
# define INSET_WORDS 3
# endif
#else
# ifdef GC_ALIGN_EIGHT
# ifdef SIXTY_FOUR_BIT_INTEGERS
# define ALIGN_SIZE(sizew) (sizew)
# define ALIGN_BYTES_SIZE(sizeb) (sizeb)
# define INSET_WORDS 0
# else
# define ALIGN_SIZE(sizew) (((sizew) & 0x1) ? ((sizew) + 1) : (sizew))
# define ALIGN_BYTES_SIZE(sizeb) (((sizeb) & WORD_SIZE) ? ((sizeb) + WORD_SIZE) : (sizeb))
# define INSET_WORDS 1
# endif
# else
# define ALIGN_SIZE(sizew) (sizew)
# define ALIGN_BYTES_SIZE(sizeb) (sizeb)
# define INSET_WORDS 0
# endif
#endif
static void *allocate_medium(size_t sizeb, int type) static void *allocate_medium(size_t sizeb, int type)
{ {
NewGC *gc; NewGC *gc;
@ -572,7 +583,7 @@ static void *allocate_medium(size_t sizeb, int type)
} }
sz += WORD_SIZE; /* add trailing word, in case pointer is to end */ sz += WORD_SIZE; /* add trailing word, in case pointer is to end */
sz += WORD_SIZE; /* room for objhead */ sz += OBJHEAD_SIZE; /* room for objhead */
sz = ALIGN_BYTES_SIZE(sz); sz = ALIGN_BYTES_SIZE(sz);
gc = GC_get_GC(); gc = GC_get_GC();
@ -587,8 +598,8 @@ static void *allocate_medium(size_t sizeb, int type)
info->type = type; info->type = type;
page->previous_size = (n + sz); page->previous_size = (n + sz);
page->live_size += sz; page->live_size += sz;
p = PTR(NUM(info) + WORD_SIZE); p = OBJHEAD_TO_OBJPTR(info);
memset(p, 0, sz - WORD_SIZE); memset(p, 0, sz - OBJHEAD_SIZE);
return p; return p;
} }
n += sz; n += sz;
@ -626,7 +637,11 @@ static void *allocate_medium(size_t sizeb, int type)
info->dead = 0; info->dead = 0;
info->type = type; info->type = type;
return PTR(NUM(info) + WORD_SIZE); {
void * objptr = OBJHEAD_TO_OBJPTR(info);
ASSERT_VALID_OBJPTR(objptr);
return objptr;
}
} }
inline static struct mpage *gen0_create_new_mpage(NewGC *gc) { inline static struct mpage *gen0_create_new_mpage(NewGC *gc) {
@ -654,23 +669,24 @@ inline static size_t gen0_size_in_use(NewGC *gc) {
return (gc->gen0.current_size + ((GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr)) - PREFIX_SIZE)); return (gc->gen0.current_size + ((GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr)) - PREFIX_SIZE));
} }
inline static void *allocate(size_t sizeb, int type) #define BYTES_MULTIPLE_OF_WORD_TO_WORDS(sizeb) ((sizeb) >> gcLOG_WORD_SIZE)
inline static void *allocate(const size_t request_size, const int type)
{ {
size_t sizew; size_t allocate_size;
unsigned long newptr; unsigned long newptr;
NewGC *gc;
if(sizeb == 0) return zero_sized; if(request_size == 0) return zero_sized;
sizew = ALIGN_SIZE(( gcBYTES_TO_WORDS(sizeb) + 1)); allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
if(sizew > MAX_OBJECT_SIZEW) return allocate_big(sizeb, type); if(allocate_size > MAX_OBJECT_SIZE) return allocate_big(request_size, type);
sizeb = gcWORDS_TO_BYTES(sizew);
/* ensure that allocation will fit in a gen0 page */ /* ensure that allocation will fit in a gen0 page */
newptr = GC_gen0_alloc_page_ptr + sizeb; newptr = GC_gen0_alloc_page_ptr + allocate_size;
ASSERT_VALID_OBJPTR(newptr);
while (OVERFLOWS_GEN0(newptr)) { while (OVERFLOWS_GEN0(newptr)) {
gc = GC_get_GC(); NewGC *gc = GC_get_GC();
/* bring page size used up to date */ /* bring page size used up to date */
gc->gen0.curr_alloc_page->size = GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr); gc->gen0.curr_alloc_page->size = GC_gen0_alloc_page_ptr - NUM(gc->gen0.curr_alloc_page->addr);
gc->gen0.current_size += gc->gen0.curr_alloc_page->size; gc->gen0.current_size += gc->gen0.curr_alloc_page->size;
@ -679,6 +695,7 @@ inline static void *allocate(size_t sizeb, int type)
if(gc->gen0.curr_alloc_page->next) { if(gc->gen0.curr_alloc_page->next) {
gc->gen0.curr_alloc_page = gc->gen0.curr_alloc_page->next; gc->gen0.curr_alloc_page = gc->gen0.curr_alloc_page->next;
GC_gen0_alloc_page_ptr = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size; GC_gen0_alloc_page_ptr = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size;
ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE; GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE;
} }
/* WARNING: tries to avoid a collection but /* WARNING: tries to avoid a collection but
@ -692,103 +709,112 @@ inline static void *allocate(size_t sizeb, int type)
gc->gen0.curr_alloc_page = new_mpage; gc->gen0.curr_alloc_page = new_mpage;
GC_gen0_alloc_page_ptr = NUM(new_mpage->addr); GC_gen0_alloc_page_ptr = NUM(new_mpage->addr);
ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_end = NUM(new_mpage->addr) + GEN0_PAGE_SIZE; GC_gen0_alloc_page_end = NUM(new_mpage->addr) + GEN0_PAGE_SIZE;
} }
else { else {
garbage_collect(gc, 0); garbage_collect(gc, 0);
} }
newptr = GC_gen0_alloc_page_ptr + sizeb; newptr = GC_gen0_alloc_page_ptr + allocate_size;
ASSERT_VALID_OBJPTR(newptr);
} }
/* actual Allocation */ /* actual Allocation */
{ {
struct objhead *info; objhead *info = (objhead *)PTR(GC_gen0_alloc_page_ptr);
void *retval = PTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_ptr = newptr; GC_gen0_alloc_page_ptr = newptr;
if (type == PAGE_ATOMIC) if (type == PAGE_ATOMIC)
*((void **)retval) = NULL; /* init objhead */ memset(info, 0, sizeof(objhead)); /* init objhead */
else else
bzero(retval, sizeb); bzero(info, allocate_size);
info = (struct objhead *)retval;
info->type = type; info->type = type;
info->size = sizew; info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
{
return PTR(NUM(retval) + WORD_SIZE); void * objptr = OBJHEAD_TO_OBJPTR(info);
ASSERT_VALID_OBJPTR(objptr);
return objptr;
}
} }
} }
inline static void *fast_malloc_one_small_tagged(size_t sizeb, int dirty)
inline static void *fast_malloc_one_small_tagged(size_t request_size, int dirty)
{ {
unsigned long newptr; unsigned long newptr;
const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
sizeb += WORD_SIZE; newptr = GC_gen0_alloc_page_ptr + allocate_size;
sizeb = ALIGN_BYTES_SIZE(sizeb); ASSERT_VALID_OBJPTR(newptr);
newptr = GC_gen0_alloc_page_ptr + sizeb;
if(OVERFLOWS_GEN0(newptr)) { if(OVERFLOWS_GEN0(newptr)) {
return GC_malloc_one_tagged(sizeb - WORD_SIZE); return GC_malloc_one_tagged(request_size);
} else { } else {
void *retval = PTR(GC_gen0_alloc_page_ptr); objhead *info = (objhead *)PTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_ptr = newptr; GC_gen0_alloc_page_ptr = newptr;
if (dirty) if (dirty)
*((void **)retval) = NULL; /* init objhead */ memset(info, 0, sizeof(objhead)); /* init objhead */
else else
bzero(retval, sizeb); bzero(info, allocate_size);
((struct objhead *)retval)->size = (sizeb >> gcLOG_WORD_SIZE); info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
return PTR(NUM(retval) + WORD_SIZE); {
void * objptr = OBJHEAD_TO_OBJPTR(info);
ASSERT_VALID_OBJPTR(objptr);
return objptr;
}
} }
} }
#define PAIR_SIZE_IN_BYTES ALIGN_BYTES_SIZE(gcWORDS_TO_BYTES(gcBYTES_TO_WORDS(sizeof(Scheme_Simple_Object))) + WORD_SIZE) #define PAIR_SIZE_IN_BYTES ALIGN_BYTES_SIZE(sizeof(Scheme_Simple_Object) + OBJHEAD_SIZE)
void *GC_malloc_pair(void *car, void *cdr) void *GC_malloc_pair(void *car, void *cdr)
{ {
unsigned long ptr, newptr; unsigned long newptr;
size_t sizeb; void *pair;
void *retval; const size_t allocate_size = PAIR_SIZE_IN_BYTES;
sizeb = PAIR_SIZE_IN_BYTES; newptr = GC_gen0_alloc_page_ptr + allocate_size;
ptr = GC_gen0_alloc_page_ptr; ASSERT_VALID_OBJPTR(newptr);
newptr = GC_gen0_alloc_page_ptr + sizeb;
if(OVERFLOWS_GEN0(newptr)) { if(OVERFLOWS_GEN0(newptr)) {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
gc->park[0] = car; gc->park[0] = car;
gc->park[1] = cdr; gc->park[1] = cdr;
retval = GC_malloc_one_tagged(sizeb - WORD_SIZE); pair = GC_malloc_one_tagged(sizeof(Scheme_Simple_Object));
car = gc->park[0]; car = gc->park[0];
cdr = gc->park[1]; cdr = gc->park[1];
gc->park[0] = NULL; gc->park[0] = NULL;
gc->park[1] = NULL; gc->park[1] = NULL;
} else { }
struct objhead *info; else {
objhead *info = (objhead *) PTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_ptr = newptr; GC_gen0_alloc_page_ptr = newptr;
retval = PTR(ptr); memset(info, 0, sizeof(objhead) + WORD_SIZE); /* init objhead */ /* init first word of SchemeObject to 0 */
info = (struct objhead *)retval;
((void **)retval)[0] = NULL; /* objhead */
((void **)retval)[1] = 0; /* tag word */
/* info->type = type; */ /* We know that the type field is already 0 */ /* info->type = type; */ /* We know that the type field is already 0 */
info->size = (sizeb >> gcLOG_WORD_SIZE); info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
retval = PTR(NUM(retval) + WORD_SIZE); pair = OBJHEAD_TO_OBJPTR(info);
ASSERT_VALID_OBJPTR(pair);
} }
((short *)retval)[0] = scheme_pair_type;
((void **)retval)[1] = car;
((void **)retval)[2] = cdr;
return retval; /* initialize pair */
{
Scheme_Simple_Object *obj = (Scheme_Simple_Object *) pair;
obj->iso.so.type = scheme_pair_type;
obj->u.pair_val.car = car;
obj->u.pair_val.cdr = cdr;
}
return pair;
} }
/* the allocation mechanism we present to the outside world */ /* the allocation mechanism we present to the outside world */
@ -808,23 +834,33 @@ void GC_free(void *p) {}
long GC_compute_alloc_size(long sizeb) long GC_compute_alloc_size(long sizeb)
{ {
return ALIGN_BYTES_SIZE(gcWORDS_TO_BYTES(gcBYTES_TO_WORDS(sizeb)) + WORD_SIZE); return COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(sizeb);
} }
long GC_initial_word(int sizeb) long GC_initial_word(int request_size)
{ {
long w = 0; long w = 0;
struct objhead info; objhead info;
sizeb = ALIGN_BYTES_SIZE(gcWORDS_TO_BYTES(gcBYTES_TO_WORDS(sizeb)) + WORD_SIZE); const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(request_size);
memset(&info, 0, sizeof(struct objhead)); memset(&info, 0, sizeof(objhead));
info.size = (sizeb >> gcLOG_WORD_SIZE); info.size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
memcpy(&w, &info, sizeof(struct objhead)); memcpy(&w, &info, sizeof(objhead));
return w; return w;
} }
void GC_initial_words(char *buffer, int sizeb)
{
objhead *info = (objhead *)buffer;
const size_t allocate_size = COMPUTE_ALLOC_SIZE_FOR_OBJECT_SIZE(sizeb);
memset(info, 0, sizeof(objhead));
info->size = BYTES_MULTIPLE_OF_WORD_TO_WORDS(allocate_size); /* ALIGN_BYTES_SIZE bumbed us up to the next word boundary */
}
long GC_alloc_alignment() long GC_alloc_alignment()
{ {
return APAGE_SIZE; return APAGE_SIZE;
@ -878,6 +914,7 @@ inline static void resize_gen0(NewGC *gc, unsigned long new_size)
/* we're going to allocate onto the first page now */ /* we're going to allocate onto the first page now */
gc->gen0.curr_alloc_page = gc->gen0.pages; gc->gen0.curr_alloc_page = gc->gen0.pages;
GC_gen0_alloc_page_ptr = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size; GC_gen0_alloc_page_ptr = NUM(gc->gen0.curr_alloc_page->addr) + gc->gen0.curr_alloc_page->size;
ASSERT_VALID_OBJPTR(GC_gen0_alloc_page_ptr);
GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE; GC_gen0_alloc_page_end = NUM(gc->gen0.curr_alloc_page->addr) + GEN0_PAGE_SIZE;
/* set the two size variables */ /* set the two size variables */
@ -914,7 +951,7 @@ inline static int marked(NewGC *gc, void *p)
if((NUM(page->addr) + page->previous_size) > NUM(p)) if((NUM(page->addr) + page->previous_size) > NUM(p))
return 1; return 1;
} }
return ((struct objhead *)(NUM(p) - WORD_SIZE))->mark; return OBJPTR_TO_OBJHEAD(p)->mark;
} }
/*****************************************************************************/ /*****************************************************************************/
@ -1069,9 +1106,9 @@ static void *get_backtrace(struct mpage *page, void *ptr)
if (page->size_class) { if (page->size_class) {
if (page->size_class > 1) if (page->size_class > 1)
ptr = PTR((char *)page->addr + PREFIX_SIZE + WORD_SIZE); ptr = BIG_PAGE_TO_OBJECT(page);
else else
ptr = (char *)MED_OBJHEAD(ptr, page->size) + WORD_SIZE; ptr = MED_OBJHEAD_TO_OBJECT(ptr, page->size);
} }
delta = PPTR(ptr) - PPTR(page->addr); delta = PPTR(ptr) - PPTR(page->addr);
@ -1783,27 +1820,27 @@ void GC_mark(const void *const_p)
} }
page->marked_on = 1; page->marked_on = 1;
record_backtrace(page, PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE)); record_backtrace(page, BIG_PAGE_TO_OBJECT(page));
GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page)); GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page));
/* Finally, we want to add this to our mark queue, so we can /* Finally, we want to add this to our mark queue, so we can
propagate its pointers */ propagate its pointers */
push_ptr(p); push_ptr(p);
} else { } else {
/* A medium page. */ /* A medium page. */
struct objhead *info = MED_OBJHEAD(p, page->size); objhead *info = MED_OBJHEAD(p, page->size);
if (info->mark) { if (info->mark) {
GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p)); GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
return; return;
} }
info->mark = 1; info->mark = 1;
page->marked_on = 1; page->marked_on = 1;
p = PTR(NUM(info) + WORD_SIZE); p = OBJHEAD_TO_OBJPTR(info);
backtrace_new_page_if_needed(gc, page); backtrace_new_page_if_needed(gc, page);
record_backtrace(page, p); record_backtrace(page, p);
push_ptr(p); push_ptr(p);
} }
} else { } else {
struct objhead *ohead = (struct objhead *)(NUM(p) - WORD_SIZE); objhead *ohead = OBJPTR_TO_OBJHEAD(p);
if(ohead->mark) { if(ohead->mark) {
GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p)); GCDEBUG((DEBUGOUTF,"Not marking %p (already marked)\n", p));
@ -1902,16 +1939,18 @@ void GC_mark(const void *const_p)
#ifdef NEWGC_BTC_ACCOUNT #ifdef NEWGC_BTC_ACCOUNT
BTC_set_btc_mark(gc, newplace); BTC_set_btc_mark(gc, newplace);
#endif #endif
/* drop the new location of the object into the forwarding space
and into the mark queue */ {
newplace = PTR(NUM(newplace) + WORD_SIZE); /* drop the new location of the object into the forwarding space
/* record why we marked this one (if enabled) */ and into the mark queue */
record_backtrace(work, newplace); void *newp = OBJHEAD_TO_OBJPTR(newplace);
/* set forwarding pointer */ /* record why we marked this one (if enabled) */
GCDEBUG((DEBUGOUTF,"Marking %p (moved to %p on page %p)\n", record_backtrace(work, newp);
p, newplace, work)); /* set forwarding pointer */
*(void**)p = newplace; GCDEBUG((DEBUGOUTF,"Marking %p (moved to %p on page %p)\n", p, newp, work));
push_ptr(newplace); *(void**)p = newp;
push_ptr(newp);
}
} }
} }
} }
@ -1932,7 +1971,7 @@ static void propagate_marks(NewGC *gc)
because we vet bad cases out in GC_mark, above */ because we vet bad cases out in GC_mark, above */
if(page->size_class) { if(page->size_class) {
if(page->size_class > 1) { if(page->size_class > 1) {
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
void **end = PPTR(NUM(page->addr) + page->size); void **end = PPTR(NUM(page->addr) + page->size);
set_backtrace_source(start, page->page_type); set_backtrace_source(start, page->page_type);
@ -1941,6 +1980,7 @@ static void propagate_marks(NewGC *gc)
case PAGE_TAGGED: case PAGE_TAGGED:
{ {
unsigned short tag = *(unsigned short*)start; unsigned short tag = *(unsigned short*)start;
ASSERT_TAG(tag);
if((unsigned long)mark_table[tag] < PAGE_TYPES) { if((unsigned long)mark_table[tag] < PAGE_TYPES) {
/* atomic */ /* atomic */
} else { } else {
@ -1954,6 +1994,7 @@ static void propagate_marks(NewGC *gc)
case PAGE_TARRAY: case PAGE_TARRAY:
{ {
unsigned short tag = *(unsigned short *)start; unsigned short tag = *(unsigned short *)start;
ASSERT_TAG(tag);
end -= INSET_WORDS; end -= INSET_WORDS;
while(start < end) { while(start < end) {
GC_ASSERT(mark_table[tag]); GC_ASSERT(mark_table[tag]);
@ -1964,7 +2005,7 @@ static void propagate_marks(NewGC *gc)
} }
} else { } else {
/* Medium page */ /* Medium page */
struct objhead *info = (struct objhead *)(NUM(p) - WORD_SIZE); objhead *info = OBJPTR_TO_OBJHEAD(p);
set_backtrace_source(p, info->type); set_backtrace_source(p, info->type);
@ -1972,6 +2013,7 @@ static void propagate_marks(NewGC *gc)
case PAGE_TAGGED: case PAGE_TAGGED:
{ {
unsigned short tag = *(unsigned short*)p; unsigned short tag = *(unsigned short*)p;
ASSERT_TAG(tag);
GC_ASSERT(mark_table[tag]); GC_ASSERT(mark_table[tag]);
mark_table[tag](p); mark_table[tag](p);
break; break;
@ -1986,7 +2028,7 @@ static void propagate_marks(NewGC *gc)
} }
} }
} else { } else {
struct objhead *info = (struct objhead *)(NUM(p) - WORD_SIZE); objhead *info = OBJPTR_TO_OBJHEAD(p);
set_backtrace_source(p, info->type); set_backtrace_source(p, info->type);
@ -1994,6 +2036,7 @@ static void propagate_marks(NewGC *gc)
case PAGE_TAGGED: case PAGE_TAGGED:
{ {
unsigned short tag = *(unsigned short*)p; unsigned short tag = *(unsigned short*)p;
ASSERT_TAG(tag);
GC_ASSERT(mark_table[tag]); GC_ASSERT(mark_table[tag]);
mark_table[tag](p); mark_table[tag](p);
break; break;
@ -2009,6 +2052,7 @@ static void propagate_marks(NewGC *gc)
void **start = p; void **start = p;
void **end = PPTR(info) + (info->size - INSET_WORDS); void **end = PPTR(info) + (info->size - INSET_WORDS);
unsigned short tag = *(unsigned short *)start; unsigned short tag = *(unsigned short *)start;
ASSERT_TAG(tag);
while(start < end) { while(start < end) {
GC_ASSERT(mark_table[tag]); GC_ASSERT(mark_table[tag]);
start += mark_table[tag](start); start += mark_table[tag](start);
@ -2024,14 +2068,14 @@ static void propagate_marks(NewGC *gc)
void *GC_resolve(void *p) void *GC_resolve(void *p)
{ {
NewGC *gc = GC_get_GC(); NewGC *gc = GC_get_GC();
struct mpage *page = pagemap_find_page(gc->page_maps, p); mpage *page = pagemap_find_page(gc->page_maps, p);
struct objhead *info; objhead *info;
if(!page || page->size_class) if(!page || page->size_class)
return p; return p;
info = (struct objhead *)(NUM(p) - WORD_SIZE); info = OBJPTR_TO_OBJHEAD(p);
if(info->mark && info->moved) if(info->mark && info->moved)
return *(void**)p; return *(void**)p;
else else
return p; return p;
@ -2045,7 +2089,7 @@ void *GC_fixup_self(void *p)
void GC_fixup(void *pp) void GC_fixup(void *pp)
{ {
NewGC *gc; NewGC *gc;
struct mpage *page; mpage *page;
void *p = *(void**)pp; void *p = *(void**)pp;
if(!p || (NUM(p) & 0x1)) if(!p || (NUM(p) & 0x1))
@ -2053,10 +2097,10 @@ void GC_fixup(void *pp)
gc = GC_get_GC(); gc = GC_get_GC();
if((page = pagemap_find_page(gc->page_maps, p))) { if((page = pagemap_find_page(gc->page_maps, p))) {
struct objhead *info; objhead *info;
if(page->size_class) return; if(page->size_class) return;
info = (struct objhead *)(NUM(p) - WORD_SIZE); info = OBJPTR_TO_OBJHEAD(p);
if(info->mark && info->moved) if(info->mark && info->moved)
*(void**)pp = *(void**)p; *(void**)pp = *(void**)p;
else GCDEBUG((DEBUGOUTF, "Not repairing %p from %p (not moved)\n",p,pp)); else GCDEBUG((DEBUGOUTF, "Not repairing %p from %p (not moved)\n",p,pp));
@ -2073,9 +2117,9 @@ void GC_fixup(void *pp)
static void *trace_pointer_start(struct mpage *page, void *p) { static void *trace_pointer_start(struct mpage *page, void *p) {
if (page->size_class) { if (page->size_class) {
if (page->size_class > 1) if (page->size_class > 1)
return PTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); return BIG_PAGE_TO_OBJECT(page);
else else
return PTR(NUM(MED_OBJHEAD(p, page->size)) + WORD_SIZE); return MED_OBJHEAD_TO_OBJECT(p, page->size);
} else } else
return p; return p;
} }
@ -2123,17 +2167,19 @@ void GC_dump_with_traces(int flags,
void **end = PPTR(NUM(page->addr) + page->size); void **end = PPTR(NUM(page->addr) + page->size);
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
if(!info->dead) { if(!info->dead) {
unsigned short tag = *(unsigned short *)(start + 1); void *obj_start = OBJHEAD_TO_OBJPTR(start);
unsigned short tag = *(unsigned short *)obj_start;
ASSERT_TAG(tag);
if (tag < MAX_DUMP_TAG) { if (tag < MAX_DUMP_TAG) {
counts[tag]++; counts[tag]++;
sizes[tag] += info->size; sizes[tag] += info->size;
} }
if (tag == trace_for_tag) { if (tag == trace_for_tag) {
register_traced_object(start + 1); register_traced_object(obj_start);
if (for_each_found) if (for_each_found)
for_each_found(start + 1); for_each_found(obj_start);
} }
} }
start += info->size; start += info->size;
@ -2142,16 +2188,18 @@ void GC_dump_with_traces(int flags,
for (page = gc->gen1_pages[PAGE_BIG]; page; page = page->next) { for (page = gc->gen1_pages[PAGE_BIG]; page; page = page->next) {
if (page->page_type == PAGE_TAGGED) { if (page->page_type == PAGE_TAGGED) {
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE); void **start = PPTR(NUM(page->addr) + PREFIX_SIZE);
unsigned short tag = *(unsigned short *)(start + 1); void *obj_start = OBJHEAD_TO_OBJPTR(start);
unsigned short tag = *(unsigned short *)obj_start;
ASSERT_TAG(tag);
if (tag < MAX_DUMP_TAG) { if (tag < MAX_DUMP_TAG) {
counts[tag]++; counts[tag]++;
sizes[tag] += gcBYTES_TO_WORDS(page->size); sizes[tag] += gcBYTES_TO_WORDS(page->size);
} }
if ((tag == trace_for_tag) if ((tag == trace_for_tag)
|| (tag == -trace_for_tag)) { || (tag == -trace_for_tag)) {
register_traced_object(start + 1); register_traced_object(obj_start);
if (for_each_found) if (for_each_found)
for_each_found(start + 1); for_each_found(obj_start);
} }
} }
} }
@ -2164,15 +2212,17 @@ void GC_dump_with_traces(int flags,
struct objhead *info = (struct objhead *)start; struct objhead *info = (struct objhead *)start;
if (!info->dead) { if (!info->dead) {
if (info->type == PAGE_TAGGED) { if (info->type == PAGE_TAGGED) {
unsigned short tag = *(unsigned short *)(start + 1); void *obj_start = OBJHEAD_TO_OBJPTR(start);
unsigned short tag = *(unsigned short *)obj_start;
ASSERT_TAG(tag);
if (tag < MAX_DUMP_TAG) { if (tag < MAX_DUMP_TAG) {
counts[tag]++; counts[tag]++;
sizes[tag] += info->size; sizes[tag] += info->size;
} }
if (tag == trace_for_tag) { if (tag == trace_for_tag) {
register_traced_object(start + 1); register_traced_object(obj_staart);
if (for_each_found) if (for_each_found)
for_each_found(start + 1); for_each_found(obj_start);
} }
} }
} }
@ -2382,21 +2432,21 @@ static void mark_backpointers(NewGC *gc)
if(work->size_class) { if(work->size_class) {
/* must be a big page */ /* must be a big page */
work->size_class = 3; work->size_class = 3;
push_ptr(PPTR(NUM(work->addr) + PREFIX_SIZE + sizeof(struct objhead))); push_ptr(BIG_PAGE_TO_OBJECT(work));
} else { } else {
if(work->page_type != PAGE_ATOMIC) { if(work->page_type != PAGE_ATOMIC) {
void **start = PPTR(NUM(work->addr) + PREFIX_SIZE); void **start = PPTR(NUM(work->addr) + PREFIX_SIZE);
void **end = PPTR(NUM(work->addr) + work->size); void **end = PPTR(NUM(work->addr) + work->size);
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
if(!info->dead) { if(!info->dead) {
info->mark = 1; info->mark = 1;
/* This must be a push_ptr, and not a direct call to /* This must be a push_ptr, and not a direct call to
internal_mark. This is because we need every object internal_mark. This is because we need every object
in the older heap to be marked out of and noted as in the older heap to be marked out of and noted as
marked before we do anything else */ marked before we do anything else */
push_ptr(start + 1); push_ptr(OBJHEAD_TO_OBJPTR(start));
} }
start += info->size; start += info->size;
} }
@ -2421,11 +2471,11 @@ static void mark_backpointers(NewGC *gc)
pagemap_add(pagemap, work); pagemap_add(pagemap, work);
while(start <= end) { while(start <= end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
if(!info->dead) { if(!info->dead) {
info->mark = 1; info->mark = 1;
/* This must be a push_ptr (see above) */ /* This must be a push_ptr (see above) */
push_ptr(start + 1); push_ptr(OBJHEAD_TO_OBJPTR(info));
} }
start += info->size; start += info->size;
} }
@ -2467,7 +2517,7 @@ inline static void do_heap_compact(NewGC *gc)
PageMap pagemap = gc->page_maps; PageMap pagemap = gc->page_maps;
for(i = 0; i < PAGE_BIG; i++) { for(i = 0; i < PAGE_BIG; i++) {
struct mpage *work = gc->gen1_pages[i], *prev, *npage; mpage *work = gc->gen1_pages[i], *prev, *npage;
/* Start from the end: */ /* Start from the end: */
if (work) { if (work) {
@ -2496,9 +2546,7 @@ inline static void do_heap_compact(NewGC *gc)
newplace = PPTR(NUM(npage->addr) + npage->size); newplace = PPTR(NUM(npage->addr) + npage->size);
while(start < end) { while(start < end) {
struct objhead *info; objhead *info = (objhead *)start;
info = (struct objhead *)start;
if(info->mark) { if(info->mark) {
while (avail <= info->size) { while (avail <= info->size) {
@ -2521,7 +2569,7 @@ inline static void do_heap_compact(NewGC *gc)
gcWORDS_TO_BYTES(info->size), start+1, newplace+1)); gcWORDS_TO_BYTES(info->size), start+1, newplace+1));
memcpy(newplace, start, gcWORDS_TO_BYTES(info->size)); memcpy(newplace, start, gcWORDS_TO_BYTES(info->size));
info->moved = 1; info->moved = 1;
*(PPTR(NUM(start) + WORD_SIZE)) = PTR(NUM(newplace) + WORD_SIZE); *(PPTR(OBJHEAD_TO_OBJPTR(start))) = OBJHEAD_TO_OBJPTR(newplace);
copy_backtrace_source(npage, newplace, work, start); copy_backtrace_source(npage, newplace, work, start);
newplace += info->size; newplace += info->size;
avail -= info->size; avail -= info->size;
@ -2557,7 +2605,7 @@ inline static void do_heap_compact(NewGC *gc)
static void repair_heap(NewGC *gc) static void repair_heap(NewGC *gc)
{ {
struct mpage *page; mpage *page;
int i; int i;
Fixup_Proc *fixup_table = gc->fixup_table; Fixup_Proc *fixup_table = gc->fixup_table;
@ -2568,7 +2616,7 @@ static void repair_heap(NewGC *gc)
/* these are guaranteed not to be protected */ /* these are guaranteed not to be protected */
if(page->size_class) { if(page->size_class) {
/* since we get here via gen1_pages, it's a big page */ /* since we get here via gen1_pages, it's a big page */
void **start = PPTR(NUM(page->addr) + PREFIX_SIZE + WORD_SIZE); void **start = PPTR(BIG_PAGE_TO_OBJECT(page));
void **end = PPTR(NUM(page->addr) + page->size); void **end = PPTR(NUM(page->addr) + page->size);
GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n", GCDEBUG((DEBUGOUTF, "Cleaning objs on page %p, starting with %p\n",
@ -2587,6 +2635,7 @@ static void repair_heap(NewGC *gc)
break; break;
case PAGE_TARRAY: { case PAGE_TARRAY: {
unsigned short tag = *(unsigned short *)start; unsigned short tag = *(unsigned short *)start;
ASSERT_TAG(tag);
end -= INSET_WORDS; end -= INSET_WORDS;
while(start < end) start += fixup_table[tag](start); while(start < end) start += fixup_table[tag](start);
break; break;
@ -2601,11 +2650,14 @@ static void repair_heap(NewGC *gc)
switch(page->page_type) { switch(page->page_type) {
case PAGE_TAGGED: case PAGE_TAGGED:
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
if(info->mark) { if(info->mark) {
void *obj_start = OBJHEAD_TO_OBJPTR(start);
unsigned short tag = *(unsigned short *)obj_start;
ASSERT_TAG(tag);
info->mark = 0; info->mark = 0;
fixup_table[*(unsigned short*)(start+1)](start+1); fixup_table[tag](obj_start);
} else { } else {
info->dead = 1; info->dead = 1;
} }
@ -2614,7 +2666,7 @@ static void repair_heap(NewGC *gc)
break; break;
case PAGE_ATOMIC: case PAGE_ATOMIC:
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
if(info->mark) { if(info->mark) {
info->mark = 0; info->mark = 0;
} else info->dead = 1; } else info->dead = 1;
@ -2623,10 +2675,11 @@ static void repair_heap(NewGC *gc)
break; break;
case PAGE_ARRAY: case PAGE_ARRAY:
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
size_t size = info->size; size_t size = info->size;
if(info->mark) { if(info->mark) {
void **tempend = (start++) + size; void **tempend = PPTR(info) + info->size;
start = OBJHEAD_TO_OBJPTR(start);
while(start < tempend) gcFIXUP(*start++); while(start < tempend) gcFIXUP(*start++);
info->mark = 0; info->mark = 0;
} else { } else {
@ -2637,11 +2690,14 @@ static void repair_heap(NewGC *gc)
break; break;
case PAGE_TARRAY: case PAGE_TARRAY:
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
size_t size = info->size; size_t size = info->size;
if(info->mark) { if(info->mark) {
void **tempend = (start++) + (size - INSET_WORDS); void **tempend = PPTR(info) + (info->size - INSET_WORDS);
unsigned short tag = *(unsigned short*)start; unsigned short tag;
start = OBJHEAD_TO_OBJPTR(start);
tag = *(unsigned short*)start;
ASSERT_TAG(tag);
while(start < tempend) while(start < tempend)
start += fixup_table[tag](start); start += fixup_table[tag](start);
info->mark = 0; info->mark = 0;
@ -2654,9 +2710,9 @@ static void repair_heap(NewGC *gc)
break; break;
case PAGE_XTAGGED: case PAGE_XTAGGED:
while(start < end) { while(start < end) {
struct objhead *info = (struct objhead *)start; objhead *info = (objhead *)start;
if(info->mark) { if(info->mark) {
GC_fixup_xtagged(start + 1); GC_fixup_xtagged(OBJHEAD_TO_OBJPTR(start));
info->mark = 0; info->mark = 0;
} else info->dead = 1; } else info->dead = 1;
start += info->size; start += info->size;
@ -2679,13 +2735,17 @@ static void repair_heap(NewGC *gc)
switch(info->type) { switch(info->type) {
case PAGE_ARRAY: case PAGE_ARRAY:
{ {
void **tempend = (start++) + info->size; void **tempend = PPTR(info) + info->size;
start = OBJHEAD_TO_OBJPTR(start);
while(start < tempend) gcFIXUP(*start++); while(start < tempend) gcFIXUP(*start++);
} }
break; break;
case PAGE_TAGGED: case PAGE_TAGGED:
{ {
fixup_table[*(unsigned short*)(start+1)](start+1); void *obj_start = OBJHEAD_TO_OBJPTR(start);
unsigned short tag = *(unsigned short *)obj_start;
ASSERT_TAG(tag);
fixup_table[tag](obj_start);
start += info->size; start += info->size;
} }
break; break;

View File

@ -1185,6 +1185,7 @@ static void _jit_prolog_again(mz_jit_state *jitter, int n, int ret_addr_reg)
#ifdef CAN_INLINE_ALLOC #ifdef CAN_INLINE_ALLOC
extern THREAD_LOCAL unsigned long GC_gen0_alloc_page_ptr; extern THREAD_LOCAL unsigned long GC_gen0_alloc_page_ptr;
long GC_initial_word(int sizeb); long GC_initial_word(int sizeb);
void GC_initial_words(char *buffer, int sizeb);
long GC_compute_alloc_size(long sizeb); long GC_compute_alloc_size(long sizeb);
long GC_alloc_alignment(void); long GC_alloc_alignment(void);
@ -1248,6 +1249,7 @@ static int inline_alloc(mz_jit_state *jitter, int amt, Scheme_Type ty, int immut
{ {
GC_CAN_IGNORE jit_insn *ref, *reffail; GC_CAN_IGNORE jit_insn *ref, *reffail;
long a_word, sz, algn; long a_word, sz, algn;
long a_words[2];
sz = GC_compute_alloc_size(amt); sz = GC_compute_alloc_size(amt);
algn = GC_alloc_alignment(); algn = GC_alloc_alignment();
@ -1282,12 +1284,29 @@ static int inline_alloc(mz_jit_state *jitter, int amt, Scheme_Type ty, int immut
mz_patch_branch(ref); mz_patch_branch(ref);
jit_addi_ul(JIT_R2, JIT_V1, sz); jit_addi_ul(JIT_R2, JIT_V1, sz);
(void)jit_sti_l(&GC_gen0_alloc_page_ptr, JIT_R2); (void)jit_sti_l(&GC_gen0_alloc_page_ptr, JIT_R2);
#if !defined(MZ_USE_PLACES)
a_word = GC_initial_word(amt); a_word = GC_initial_word(amt);
jit_movi_l(JIT_R2, a_word); jit_movi_l(JIT_R2, a_word);
jit_str_l(JIT_V1, JIT_R2); jit_str_l(JIT_V1, JIT_R2);
/*SchemeObject header*/
a_word = initial_tag_word(ty, immut); a_word = initial_tag_word(ty, immut);
jit_movi_l(JIT_R2, a_word); jit_movi_l(JIT_R2, a_word);
jit_stxi_l(sizeof(long), JIT_V1, JIT_R2); jit_stxi_l(sizeof(long), JIT_V1, JIT_R2);
#else
GC_initial_words(a_words, amt);
jit_movi_l(JIT_R2, a_words[0]);
jit_str_l(JIT_V1, JIT_R2);
jit_movi_l(JIT_R2, a_words[1]);
jit_stxi_l(sizeof(long), JIT_V1, JIT_R2);
/*SchemeObject header*/
a_word = initial_tag_word(ty, immut);
jit_movi_l(JIT_R2, a_word);
jit_stxi_l(sizeof(long)*2, JIT_V1, JIT_R2);
#endif
CHECK_LIMIT(); CHECK_LIMIT();
__END_TINY_JUMPS__(1); __END_TINY_JUMPS__(1);
@ -3200,7 +3219,7 @@ static int generate_double_arith(mz_jit_state *jitter, int arith, int cmp, int r
# ifdef CAN_INLINE_ALLOC # ifdef CAN_INLINE_ALLOC
inline_alloc(jitter, sizeof(Scheme_Double), scheme_double_type, 0, 0, 1, 0); inline_alloc(jitter, sizeof(Scheme_Double), scheme_double_type, 0, 0, 1, 0);
CHECK_LIMIT(); CHECK_LIMIT();
jit_addi_p(JIT_R0, JIT_V1, sizeof(long)); jit_addi_p(JIT_R0, JIT_V1, GC_OBJHEAD_SIZE);
(void)jit_stxi_d_fppop(&((Scheme_Double *)0x0)->double_val, JIT_R0, JIT_FPR1); (void)jit_stxi_d_fppop(&((Scheme_Double *)0x0)->double_val, JIT_R0, JIT_FPR1);
# else # else
(void)jit_sti_d_fppop(&double_result, JIT_FPR1); (void)jit_sti_d_fppop(&double_result, JIT_FPR1);
@ -4372,8 +4391,8 @@ static int generate_inlined_unary(mz_jit_state *jitter, Scheme_App2_Rec *app, in
inline_alloc(jitter, sizeof(Scheme_Small_Object), scheme_box_type, 0, 1, 0, 0); inline_alloc(jitter, sizeof(Scheme_Small_Object), scheme_box_type, 0, 1, 0, 0);
CHECK_LIMIT(); CHECK_LIMIT();
jit_stxi_p((long)&SCHEME_BOX_VAL(0x0) + sizeof(long), JIT_V1, JIT_R0); jit_stxi_p((long)&SCHEME_BOX_VAL(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R0);
jit_addi_p(JIT_R0, JIT_V1, sizeof(long)); jit_addi_p(JIT_R0, JIT_V1, GC_OBJHEAD_SIZE);
#else #else
/* Non-inlined */ /* Non-inlined */
JIT_UPDATE_THREAD_RSPTR_IF_NEEDED(); JIT_UPDATE_THREAD_RSPTR_IF_NEEDED();
@ -4893,9 +4912,9 @@ static int generate_inlined_binary(mz_jit_state *jitter, Scheme_App3_Rec *app, i
inline_alloc(jitter, sizeof(Scheme_Simple_Object), scheme_mutable_pair_type, 0, 1, 0, 0); inline_alloc(jitter, sizeof(Scheme_Simple_Object), scheme_mutable_pair_type, 0, 1, 0, 0);
CHECK_LIMIT(); CHECK_LIMIT();
jit_stxi_p((long)&SCHEME_MCAR(0x0) + sizeof(long), JIT_V1, JIT_R0); jit_stxi_p((long)&SCHEME_MCAR(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R0);
jit_stxi_p((long)&SCHEME_MCDR(0x0) + sizeof(long), JIT_V1, JIT_R1); jit_stxi_p((long)&SCHEME_MCDR(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R1);
jit_addi_p(JIT_R0, JIT_V1, sizeof(long)); jit_addi_p(JIT_R0, JIT_V1, GC_OBJHEAD_SIZE);
#else #else
/* Non-inlined alloc */ /* Non-inlined alloc */
JIT_UPDATE_THREAD_RSPTR_IF_NEEDED(); JIT_UPDATE_THREAD_RSPTR_IF_NEEDED();
@ -5134,13 +5153,13 @@ static int generate_cons_alloc(mz_jit_state *jitter, int rev, int inline_retry)
CHECK_LIMIT(); CHECK_LIMIT();
if (rev) { if (rev) {
jit_stxi_p((long)&SCHEME_CAR(0x0) + sizeof(long), JIT_V1, JIT_R1); jit_stxi_p((long)&SCHEME_CAR(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R1);
jit_stxi_p((long)&SCHEME_CDR(0x0) + sizeof(long), JIT_V1, JIT_R0); jit_stxi_p((long)&SCHEME_CDR(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R0);
} else { } else {
jit_stxi_p((long)&SCHEME_CAR(0x0) + sizeof(long), JIT_V1, JIT_R0); jit_stxi_p((long)&SCHEME_CAR(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R0);
jit_stxi_p((long)&SCHEME_CDR(0x0) + sizeof(long), JIT_V1, JIT_R1); jit_stxi_p((long)&SCHEME_CDR(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R1);
} }
jit_addi_p(JIT_R0, JIT_V1, sizeof(long)); jit_addi_p(JIT_R0, JIT_V1, GC_OBJHEAD_SIZE);
#else #else
/* Non-inlined */ /* Non-inlined */
JIT_UPDATE_THREAD_RSPTR_IF_NEEDED(); JIT_UPDATE_THREAD_RSPTR_IF_NEEDED();
@ -5194,14 +5213,14 @@ static int generate_vector_alloc(mz_jit_state *jitter, Scheme_Object *rator,
CHECK_LIMIT(); CHECK_LIMIT();
if ((c == 2) || (c == 1)) { if ((c == 2) || (c == 1)) {
jit_stxi_p((long)&SCHEME_VEC_ELS(0x0)[0] + sizeof(long), JIT_V1, JIT_R0); jit_stxi_p((long)&SCHEME_VEC_ELS(0x0)[0] + GC_OBJHEAD_SIZE, JIT_V1, JIT_R0);
} }
if (c == 2) { if (c == 2) {
jit_stxi_p((long)&SCHEME_VEC_ELS(0x0)[1] + sizeof(long), JIT_V1, JIT_R1); jit_stxi_p((long)&SCHEME_VEC_ELS(0x0)[1] + GC_OBJHEAD_SIZE, JIT_V1, JIT_R1);
} }
jit_movi_l(JIT_R1, c); jit_movi_l(JIT_R1, c);
jit_stxi_i((long)&SCHEME_VEC_SIZE(0x0) + sizeof(long), JIT_V1, JIT_R1); jit_stxi_i((long)&SCHEME_VEC_SIZE(0x0) + GC_OBJHEAD_SIZE, JIT_V1, JIT_R1);
jit_addi_p(JIT_R0, JIT_V1, sizeof(long)); jit_addi_p(JIT_R0, JIT_V1, GC_OBJHEAD_SIZE);
#else #else
/* Non-inlined */ /* Non-inlined */
JIT_UPDATE_THREAD_RSPTR_IF_NEEDED(); JIT_UPDATE_THREAD_RSPTR_IF_NEEDED();
@ -5304,7 +5323,7 @@ static int generate_closure(Scheme_Closure_Data *data,
/* Inlined alloc */ /* Inlined alloc */
inline_alloc(jitter, sz, scheme_native_closure_type, 0, 0, 0, 0); inline_alloc(jitter, sz, scheme_native_closure_type, 0, 0, 0, 0);
CHECK_LIMIT(); CHECK_LIMIT();
jit_addi_p(JIT_R0, JIT_V1, sizeof(long)); jit_addi_p(JIT_R0, JIT_V1, GC_OBJHEAD_SIZE);
} else } else
# endif # endif
{ {

View File

@ -220,7 +220,7 @@ static int missed_context_switch = 0;
static int have_activity = 0; static int have_activity = 0;
int scheme_active_but_sleeping = 0; int scheme_active_but_sleeping = 0;
static int thread_ended_with_activity; static int thread_ended_with_activity;
int scheme_no_stack_overflow; THREAD_LOCAL int scheme_no_stack_overflow;
static int needs_sleep_cancelled; static int needs_sleep_cancelled;