fix problems with break exceptions and thread termination during constant-folding, make GMP temporary memory accountable by GC, and limit folding of 'expt' expressions
svn: r14185
This commit is contained in:
parent
edcec6820e
commit
88222f7df4
|
@ -590,7 +590,7 @@ inline static void BTC_run_account_hooks(NewGC *gc)
|
|||
if( ((work->type == MZACCT_REQUIRE) &&
|
||||
((gc->used_pages > (gc->max_pages_for_use / 2))
|
||||
|| ((((gc->max_pages_for_use / 2) - gc->used_pages) * APAGE_SIZE)
|
||||
< (work->amount + custodian_super_require(gc, work->c1)))))
|
||||
< (work->amount + custodian_super_require(gc, work->c1)))))
|
||||
||
|
||||
((work->type == MZACCT_LIMIT) &&
|
||||
(GC_get_memory_use(work->c1) > work->amount))) {
|
||||
|
|
|
@ -1079,6 +1079,7 @@ typedef struct Scheme_Thread {
|
|||
|
||||
/* save thread-specific GMP state: */
|
||||
long gmp_tls[6];
|
||||
void *gmp_tls_data;
|
||||
|
||||
struct Scheme_Thread_Custodian_Hop *mr_hop;
|
||||
Scheme_Custodian_Reference *mref;
|
||||
|
|
|
@ -887,6 +887,20 @@ Scheme_Object *do_big_power(const Scheme_Object *a, const Scheme_Object *b)
|
|||
Scheme_Object *scheme_generic_integer_power(const Scheme_Object *a, const Scheme_Object *b)
|
||||
{
|
||||
unsigned long exponent;
|
||||
|
||||
if (scheme_current_thread->constant_folding) {
|
||||
/* if we're trying to fold a constant, limit the work that we're willing to do at compile time */
|
||||
if (SCHEME_BIGNUMP(b)
|
||||
|| (SCHEME_INT_VAL(b) > 10000))
|
||||
scheme_signal_error("too big");
|
||||
else if (SCHEME_BIGNUMP(a)) {
|
||||
int len = SCHEME_BIGLEN(a);
|
||||
if ((len > 10000)
|
||||
|| (len * SCHEME_INT_VAL(b)) > 10000)
|
||||
scheme_signal_error("too big");
|
||||
}
|
||||
}
|
||||
|
||||
if (scheme_get_unsigned_int_val((Scheme_Object *)b, &exponent))
|
||||
return do_power(a, exponent);
|
||||
else
|
||||
|
|
|
@ -660,6 +660,11 @@ call_error(char *buffer, int len, Scheme_Object *exn)
|
|||
"optimizer constant-fold attempt failed%s: %s",
|
||||
scheme_optimize_context_to_string(scheme_current_thread->constant_folding),
|
||||
buffer);
|
||||
if (SCHEME_STRUCTP(exn)
|
||||
&& scheme_is_struct_instance(exn_table[MZEXN_BREAK].type, exn)) {
|
||||
/* remember to re-raise exception */
|
||||
scheme_current_thread->reading_delayed = exn;
|
||||
}
|
||||
scheme_longjmp(scheme_error_buf, 1);
|
||||
} else if (scheme_current_thread->reading_delayed) {
|
||||
scheme_current_thread->reading_delayed = exn;
|
||||
|
@ -3257,6 +3262,11 @@ do_raise(Scheme_Object *arg, int need_debug, int eb)
|
|||
scheme_optimize_context_to_string(p->constant_folding),
|
||||
msg);
|
||||
}
|
||||
if (SCHEME_STRUCTP(arg)
|
||||
&& scheme_is_struct_instance(exn_table[MZEXN_BREAK].type, arg)) {
|
||||
/* remember to re-raise exception */
|
||||
scheme_current_thread->reading_delayed = arg;
|
||||
}
|
||||
scheme_longjmp (scheme_error_buf, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -1012,19 +1012,30 @@ static Scheme_Object *try_apply(Scheme_Object *f, Scheme_Object *args, Scheme_Ob
|
|||
folding attempts */
|
||||
{
|
||||
Scheme_Object * volatile result;
|
||||
Scheme_Object * volatile exn = NULL;
|
||||
mz_jmp_buf *savebuf, newbuf;
|
||||
|
||||
scheme_current_thread->reading_delayed = NULL;
|
||||
scheme_current_thread->constant_folding = (context ? context : scheme_true);
|
||||
savebuf = scheme_current_thread->error_buf;
|
||||
scheme_current_thread->error_buf = &newbuf;
|
||||
|
||||
if (scheme_setjmp(newbuf))
|
||||
if (scheme_setjmp(newbuf)) {
|
||||
result = NULL;
|
||||
else
|
||||
exn = scheme_current_thread->reading_delayed;
|
||||
} else
|
||||
result = _scheme_apply_to_list(f, args);
|
||||
|
||||
scheme_current_thread->error_buf = savebuf;
|
||||
scheme_current_thread->constant_folding = NULL;
|
||||
scheme_current_thread->reading_delayed = NULL;
|
||||
|
||||
if (scheme_current_thread->cjs.is_kill) {
|
||||
scheme_longjmp(*scheme_current_thread->error_buf, 1);
|
||||
}
|
||||
|
||||
if (exn)
|
||||
scheme_raise(exn);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -21,13 +21,11 @@ MA 02111-1307, USA. */
|
|||
#define _FORCE_INLINES
|
||||
#define _EXTERN_INLINE /* empty */
|
||||
|
||||
/* We use malloc for now; this will have to change. */
|
||||
/* The allocation function should not create collectable
|
||||
memory, though it can safely GC when allocating. */
|
||||
extern void *malloc(unsigned long);
|
||||
extern void free(void *);
|
||||
#define MALLOC malloc
|
||||
#define FREE(p, s) free(p)
|
||||
extern void *scheme_malloc_gmp(unsigned long, void **mem_pool);
|
||||
extern void scheme_free_gmp(void *, void **mem_pool);
|
||||
static void *mem_pool = 0;
|
||||
#define MALLOC(amt) scheme_malloc_gmp(amt, &mem_pool)
|
||||
#define FREE(p, s) scheme_free_gmp(p, &mem_pool)
|
||||
|
||||
#include "../../sconfig.h"
|
||||
#include "mzconfig.h"
|
||||
|
@ -5796,18 +5794,21 @@ void scheme_gmp_tls_init(long *s)
|
|||
((tmp_marker *)(s + 3))->alloc_point = &xxx;
|
||||
}
|
||||
|
||||
void scheme_gmp_tls_load(long *s)
|
||||
void *scheme_gmp_tls_load(long *s)
|
||||
{
|
||||
s[0] = (long)current_total_allocation;
|
||||
s[1] = (long)max_total_allocation;
|
||||
s[2] = (long)current;
|
||||
return mem_pool;
|
||||
}
|
||||
|
||||
void scheme_gmp_tls_unload(long *s)
|
||||
void scheme_gmp_tls_unload(long *s, void *data)
|
||||
{
|
||||
current_total_allocation = (unsigned long)s[0];
|
||||
max_total_allocation = (unsigned long)s[1];
|
||||
current = (tmp_stack *)s[2];
|
||||
s[0] = 0;
|
||||
mem_pool = data;
|
||||
}
|
||||
|
||||
void scheme_gmp_tls_snapshot(long *s, long *save)
|
||||
|
@ -5817,14 +5818,16 @@ void scheme_gmp_tls_snapshot(long *s, long *save)
|
|||
__gmp_tmp_mark((tmp_marker *)(s + 3));
|
||||
}
|
||||
|
||||
void scheme_gmp_tls_restore_snapshot(long *s, long *save, int do_free)
|
||||
void scheme_gmp_tls_restore_snapshot(long *s, void *data, long *save, int do_free)
|
||||
{
|
||||
long other[6];
|
||||
void *other_data;
|
||||
|
||||
if (do_free == 2) {
|
||||
scheme_gmp_tls_load(other);
|
||||
scheme_gmp_tls_unload(s);
|
||||
}
|
||||
other_data = scheme_gmp_tls_load(other);
|
||||
scheme_gmp_tls_unload(s, data);
|
||||
} else
|
||||
other_data = NULL;
|
||||
|
||||
if (do_free)
|
||||
__gmp_tmp_free((tmp_marker *)(s + 3));
|
||||
|
@ -5832,11 +5835,12 @@ void scheme_gmp_tls_restore_snapshot(long *s, long *save, int do_free)
|
|||
if (save) {
|
||||
s[3] = save[0];
|
||||
s[4] = save[1];
|
||||
|
||||
}
|
||||
|
||||
if (do_free == 2) {
|
||||
scheme_gmp_tls_load(s);
|
||||
scheme_gmp_tls_unload(other);
|
||||
data = scheme_gmp_tls_load(s);
|
||||
scheme_gmp_tls_unload(other, other_data);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -690,6 +690,7 @@ thread_val {
|
|||
gcMARK(pr->private_kill_next);
|
||||
|
||||
gcMARK(pr->user_tls);
|
||||
gcMARK(pr->gmp_tls_data);
|
||||
|
||||
gcMARK(pr->mr_hop);
|
||||
gcMARK(pr->mref);
|
||||
|
|
|
@ -5457,7 +5457,7 @@ Scheme_Object *scheme_load_delayed_code(int _which, Scheme_Load_Delay *_delay_in
|
|||
|
||||
return v;
|
||||
} else {
|
||||
if (v_exn)
|
||||
if (v_exn && !scheme_current_thread->cjs.is_kill)
|
||||
scheme_raise(v_exn);
|
||||
scheme_longjmp(*scheme_current_thread->error_buf, 1);
|
||||
return NULL;
|
||||
|
|
|
@ -114,10 +114,10 @@ static int swapping = 0;
|
|||
#endif
|
||||
|
||||
extern void scheme_gmp_tls_init(long *s);
|
||||
extern void scheme_gmp_tls_load(long *s);
|
||||
extern void scheme_gmp_tls_unload(long *s);
|
||||
extern void *scheme_gmp_tls_load(long *s);
|
||||
extern void scheme_gmp_tls_unload(long *s, void *p);
|
||||
extern void scheme_gmp_tls_snapshot(long *s, long *save);
|
||||
extern void scheme_gmp_tls_restore_snapshot(long *s, long *save, int do_free);
|
||||
extern void scheme_gmp_tls_restore_snapshot(long *s, void *data, long *save, int do_free);
|
||||
|
||||
static void check_ready_break();
|
||||
|
||||
|
@ -2511,7 +2511,9 @@ static void do_swap_thread()
|
|||
#if WATCH_FOR_NESTED_SWAPS
|
||||
swapping = 0;
|
||||
#endif
|
||||
scheme_gmp_tls_unload(scheme_current_thread->gmp_tls);
|
||||
scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
|
||||
scheme_current_thread->gmp_tls_data = NULL;
|
||||
|
||||
{
|
||||
Scheme_Object *l, *o;
|
||||
Scheme_Closure_Func f;
|
||||
|
@ -2558,7 +2560,11 @@ static void do_swap_thread()
|
|||
cb = can_break_param(scheme_current_thread);
|
||||
scheme_current_thread->can_break_at_swap = cb;
|
||||
}
|
||||
scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
|
||||
{
|
||||
GC_CAN_IGNORE void *data;
|
||||
data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
|
||||
scheme_current_thread->gmp_tls_data = data;
|
||||
}
|
||||
#ifdef RUNSTACK_IS_GLOBAL
|
||||
scheme_current_thread->runstack = MZ_RUNSTACK;
|
||||
scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
|
||||
|
@ -2782,7 +2788,8 @@ static void remove_thread(Scheme_Thread *r)
|
|||
thread_is_dead(r);
|
||||
|
||||
/* In case we kill a thread while in a bignum operation: */
|
||||
scheme_gmp_tls_restore_snapshot(r->gmp_tls, NULL, ((r == scheme_current_thread) ? 1 : 2));
|
||||
scheme_gmp_tls_restore_snapshot(r->gmp_tls, r->gmp_tls_data,
|
||||
NULL, ((r == scheme_current_thread) ? 1 : 2));
|
||||
|
||||
if (r == scheme_current_thread) {
|
||||
/* We're going to be swapped out immediately. */
|
||||
|
@ -2825,7 +2832,8 @@ static void start_child(Scheme_Thread * volatile child,
|
|||
MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
|
||||
MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
|
||||
#endif
|
||||
scheme_gmp_tls_unload(scheme_current_thread->gmp_tls);
|
||||
scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
|
||||
scheme_current_thread->gmp_tls_data = NULL;
|
||||
{
|
||||
Scheme_Object *l, *o;
|
||||
Scheme_Closure_Func f;
|
||||
|
@ -3745,7 +3753,7 @@ static Scheme_Object *raise_user_break(int argc, Scheme_Object ** volatile argv)
|
|||
int cont;
|
||||
cont = SAME_OBJ((Scheme_Object *)scheme_jumping_to_continuation,
|
||||
argv[0]);
|
||||
scheme_gmp_tls_restore_snapshot(scheme_current_thread->gmp_tls, save, !cont);
|
||||
scheme_gmp_tls_restore_snapshot(scheme_current_thread->gmp_tls, NULL, save, !cont);
|
||||
scheme_longjmp(*savebuf, 1);
|
||||
}
|
||||
|
||||
|
@ -7325,6 +7333,12 @@ static void get_ready_for_GC()
|
|||
scheme_block_child_signals(1);
|
||||
#endif
|
||||
|
||||
{
|
||||
GC_CAN_IGNORE void *data;
|
||||
data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
|
||||
scheme_current_thread->gmp_tls_data = data;
|
||||
}
|
||||
|
||||
did_gc_count++;
|
||||
}
|
||||
|
||||
|
@ -7332,6 +7346,9 @@ extern int GC_words_allocd;
|
|||
|
||||
static void done_with_GC()
|
||||
{
|
||||
scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
|
||||
scheme_current_thread->gmp_tls_data = NULL;
|
||||
|
||||
#ifdef RUNSTACK_IS_GLOBAL
|
||||
# ifdef MZ_PRECISE_GC
|
||||
if (scheme_current_thread->running) {
|
||||
|
@ -7505,6 +7522,45 @@ static Scheme_Object *current_stats(int argc, Scheme_Object *argv[])
|
|||
return scheme_void;
|
||||
}
|
||||
|
||||
/*========================================================================*/
|
||||
/* gmp allocation */
|
||||
/*========================================================================*/
|
||||
|
||||
/* Allocate atomic, immobile memory for GMP. Although we have set up
|
||||
GMP to reliably free anything that it allocates, we allocate via
|
||||
the GC to get accounting with 3m. The set of allocated blocks are
|
||||
stored in a "mem_pool" variable, which is a linked list; GMP
|
||||
allocates with a stack discipline, so maintaining the list is easy.
|
||||
Meanwhile, scheme_gmp_tls_unload, etc., attach to the pool to the
|
||||
owning thread as needed for GC. */
|
||||
|
||||
void *scheme_malloc_gmp(unsigned long amt, void **mem_pool)
|
||||
{
|
||||
void *p, *mp;
|
||||
|
||||
#ifdef MZ_PRECISE_GC
|
||||
if (amt < GC_malloc_stays_put_threshold())
|
||||
amt = GC_malloc_stays_put_threshold();
|
||||
#endif
|
||||
|
||||
p = scheme_malloc_atomic(amt);
|
||||
|
||||
mp = scheme_make_raw_pair(p, *mem_pool);
|
||||
*mem_pool = mp;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
void scheme_free_gmp(void *p, void **mem_pool)
|
||||
{
|
||||
if (p != SCHEME_CAR(*mem_pool))
|
||||
scheme_log(NULL,
|
||||
SCHEME_LOG_FATAL,
|
||||
0,
|
||||
"bad GMP memory free");
|
||||
*mem_pool = SCHEME_CDR(*mem_pool);
|
||||
}
|
||||
|
||||
/*========================================================================*/
|
||||
/* precise GC */
|
||||
/*========================================================================*/
|
||||
|
|
Loading…
Reference in New Issue
Block a user