ARM JIT: make calls & returns visible for branch prediction
This improvement makes `inflate` about 5% faster on one test platform (with no change on a much older one).
This commit is contained in:
parent
fb80ac7385
commit
41477ab7eb
|
@ -869,12 +869,13 @@ void scheme_jit_prolog_again(mz_jit_state *jitter, int n, int ret_addr_reg)
|
|||
# define mz_patch_branch_at(a, v) jit_patch_at(a, v)
|
||||
# define mz_patch_ucbranch_at(a, v) jit_patch_at(a, v)
|
||||
# define mz_prolog(x) (mz_set_local_p(JIT_LR, JIT_LOCAL2))
|
||||
# define mz_epilog(x) (mz_get_local_p(x, JIT_LOCAL2), jit_jmpr(x))
|
||||
# define mz_epilog(x) (mz_get_local_p(JIT_LR, JIT_LOCAL2), jit_jmpr(JIT_LR))
|
||||
# define mz_epilog_without_jmp() /* empty */
|
||||
# define jit_shuffle_saved_regs() /* empty */
|
||||
# define jit_unshuffle_saved_regs() /* empty */
|
||||
# define mz_push_locals() /* empty */
|
||||
# define mz_pop_locals() /* empty */
|
||||
# define jit_base_prolog() jit_prolog(0)
|
||||
# ifdef SUPPRESS_LIGHTNING_FUNCS
|
||||
void scheme_jit_prolog_again(mz_jit_state *jitter, int n, int ret_addr_reg);
|
||||
# else
|
||||
|
|
|
@ -785,7 +785,10 @@ int scheme_generate_non_tail_call(mz_jit_state *jitter, int num_rands, int direc
|
|||
/* Fast inlined-native jump ok (proc will check argc, if necessary) */
|
||||
{
|
||||
GC_CAN_IGNORE jit_insn *refr;
|
||||
#ifdef MZ_USE_JIT_I386
|
||||
#if defined(MZ_USE_JIT_I386) || defined(MZ_USE_JIT_ARM)
|
||||
# define KEEP_CALL_AND_RETURN_PAIRED
|
||||
#endif
|
||||
#ifdef KEEP_CALL_AND_RETURN_PAIRED
|
||||
GC_CAN_IGNORE jit_insn *refxr;
|
||||
#endif
|
||||
if (num_rands < 0) {
|
||||
|
@ -798,9 +801,9 @@ int scheme_generate_non_tail_call(mz_jit_state *jitter, int num_rands, int direc
|
|||
jit_movr_p(JIT_R2, JIT_FP); /* save old FP */
|
||||
}
|
||||
jit_shuffle_saved_regs(); /* maybe copies V registers to be restored */
|
||||
#ifdef MZ_USE_JIT_I386
|
||||
/* keep call & ret paired by jumping to where we really
|
||||
want to return,then back here: */
|
||||
#ifdef KEEP_CALL_AND_RETURN_PAIRED
|
||||
/* keep call & ret paired (for branch prediction) by jumping to where
|
||||
we really want to return, then back here: */
|
||||
refr = jit_jmpi(jit_forward());
|
||||
refxr = jit_get_ip();
|
||||
jit_base_prolog();
|
||||
|
@ -858,7 +861,7 @@ int scheme_generate_non_tail_call(mz_jit_state *jitter, int num_rands, int direc
|
|||
/* self-call function pointer is in R1 */
|
||||
jit_jmpr(JIT_R1);
|
||||
}
|
||||
#ifdef MZ_USE_JIT_I386
|
||||
#ifdef KEEP_CALL_AND_RETURN_PAIRED
|
||||
mz_patch_ucbranch(refr);
|
||||
(void)jit_short_calli(refxr);
|
||||
#else
|
||||
|
|
|
@ -2399,6 +2399,27 @@ arm_calli(jit_state_t _jitp, void *i0)
|
|||
return (l);
|
||||
}
|
||||
|
||||
#define jit_short_calli(i0) arm_short_calli(_jitp, i0)
|
||||
__jit_inline jit_insn *
|
||||
arm_short_calli(jit_state_t _jitp, void *i0)
|
||||
{
|
||||
jit_insn *l;
|
||||
long d;
|
||||
|
||||
if (jit_thumb_p()) {
|
||||
jit_assert((long)i0 & 0x1);
|
||||
l = _jitp->x.pc+1;
|
||||
d = (((long)i0 - (long)l) >> 1) - 2;
|
||||
T2_BLI(encode_thumb_jump(d));
|
||||
} else {
|
||||
l = _jitp->x.pc;
|
||||
d = (((long)i0 - (long)l) >> 2) - 2;
|
||||
_BLI(d & 0x00ffffff);
|
||||
}
|
||||
|
||||
return (l);
|
||||
}
|
||||
|
||||
#define jit_prepare_i(i0) arm_prepare_i(_jitp, i0)
|
||||
__jit_inline void
|
||||
arm_prepare_i(jit_state_t _jitp, int i0)
|
||||
|
|
Loading…
Reference in New Issue
Block a user