|
|
@@ -1,6 +1,6 @@
|
|
|
/*
|
|
|
* Single file autogenerated distributable for Duktape 1.2.99.
|
|
|
- * Git commit de0a9aa6126e92054d0f36393371e26562f8b9ec (v1.2.0-373-gde0a9aa-dirty).
|
|
|
+ * Git commit 50171d671af34f2c403acf61c6dc83f2d2561e24 (v1.2.0-398-g50171d6).
|
|
|
*
|
|
|
* See Duktape AUTHORS.rst and LICENSE.txt for copyright and
|
|
|
* licensing information.
|
|
|
@@ -8949,12 +8949,12 @@ struct duk_hthread {
|
|
|
/* Shared object part */
|
|
|
duk_hobject obj;
|
|
|
|
|
|
- /* Next opcode to execute. Conceptually this matches curr_pc of the
|
|
|
- * topmost activation, but having it here is important for opcode
|
|
|
- * dispatch performance. The downside is that whenever the activation
|
|
|
- * changes this field and activation curr_pc must be carefully managed.
|
|
|
+ /* Pointer to bytecode executor's 'curr_pc' variable. Used to copy
|
|
|
+ * the current PC back into the topmost activation when activation
|
|
|
+ * state is about to change (or "syncing" is otherwise needed). This
|
|
|
+ * is rather awkward but important for performance, see execution.rst.
|
|
|
*/
|
|
|
- duk_instr_t *curr_pc;
|
|
|
+ duk_instr_t **ptr_curr_pc;
|
|
|
|
|
|
/* backpointers */
|
|
|
duk_heap *heap;
|
|
|
@@ -45596,6 +45596,7 @@ DUK_INTERNAL duk_hthread *duk_hthread_alloc(duk_heap *heap, duk_uint_t hobject_f
|
|
|
duk__init_object_parts(heap, &res->obj, hobject_flags);
|
|
|
|
|
|
#ifdef DUK_USE_EXPLICIT_NULL_INIT
|
|
|
+ res->ptr_curr_pc = NULL;
|
|
|
res->heap = NULL;
|
|
|
res->valstack = NULL;
|
|
|
res->valstack_end = NULL;
|
|
|
@@ -53475,20 +53476,17 @@ DUK_INTERNAL duk_uint_fast32_t duk_hthread_get_act_prev_pc(duk_hthread *thr, duk
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/* Write thr->curr_pc back to topmost activation (if any). */
|
|
|
+/* Write bytecode executor's curr_pc back to topmost activation (if any). */
|
|
|
DUK_INTERNAL void duk_hthread_sync_currpc(duk_hthread *thr) {
|
|
|
duk_activation *act;
|
|
|
|
|
|
DUK_ASSERT(thr != NULL);
|
|
|
|
|
|
- if (thr->callstack_top > 0) {
|
|
|
- /* For native calls the assignment is OK as long as thr->curr_pc
|
|
|
- * is NULL for the duration of a native call.
|
|
|
- */
|
|
|
+ if (thr->ptr_curr_pc != NULL) {
|
|
|
+ /* ptr_curr_pc != NULL only when bytecode executor is active. */
|
|
|
+ DUK_ASSERT(thr->callstack_top > 0);
|
|
|
act = thr->callstack + thr->callstack_top - 1;
|
|
|
- DUK_ASSERT(((act->func == NULL || DUK_HOBJECT_HAS_COMPILEDFUNCTION(act->func)) && thr->curr_pc != NULL) || \
|
|
|
- ((act->func != NULL && DUK_HOBJECT_HAS_NATIVEFUNCTION(act->func)) && thr->curr_pc == NULL));
|
|
|
- act->curr_pc = thr->curr_pc;
|
|
|
+ act->curr_pc = *thr->ptr_curr_pc;
|
|
|
}
|
|
|
}
|
|
|
#line 1 "duk_hthread_stacks.c"
|
|
|
@@ -54759,11 +54757,11 @@ void duk__adjust_valstack_and_top(duk_hthread *thr, duk_idx_t num_stack_args, du
|
|
|
* for errhandler calls
|
|
|
* DUK_CALL_FLAG_CONSTRUCTOR_CALL <--> for 'new Foo()' calls
|
|
|
*
|
|
|
- * Input stack:
|
|
|
+ * Input stack (thr):
|
|
|
*
|
|
|
* [ func this arg1 ... argN ]
|
|
|
*
|
|
|
- * Output stack:
|
|
|
+ * Output stack (thr):
|
|
|
*
|
|
|
* [ retval ] (DUK_EXEC_SUCCESS)
|
|
|
* [ errobj ] (DUK_EXEC_ERROR (normal error), protected call)
|
|
|
@@ -54803,7 +54801,7 @@ duk_int_t duk_handle_call(duk_hthread *thr,
|
|
|
duk_int_t entry_call_recursion_depth;
|
|
|
duk_hthread *entry_curr_thread;
|
|
|
duk_uint_fast8_t entry_thread_state;
|
|
|
- duk_instr_t *entry_thr_curr_pc;
|
|
|
+ duk_instr_t **entry_ptr_curr_pc;
|
|
|
volatile duk_bool_t need_setjmp;
|
|
|
duk_jmpbuf * volatile old_jmpbuf_ptr = NULL; /* ptr is volatile (not the target) */
|
|
|
duk_idx_t idx_func; /* valstack index of 'func' and retval (relative to entry valstack_bottom) */
|
|
|
@@ -54845,10 +54843,8 @@ duk_int_t duk_handle_call(duk_hthread *thr,
|
|
|
entry_call_recursion_depth = thr->heap->call_recursion_depth;
|
|
|
entry_curr_thread = thr->heap->curr_thread; /* Note: may be NULL if first call */
|
|
|
entry_thread_state = thr->state;
|
|
|
- entry_thr_curr_pc = NULL; /* not actually needed, avoid warning */
|
|
|
- if (entry_curr_thread) {
|
|
|
- entry_thr_curr_pc = entry_curr_thread->curr_pc;
|
|
|
- }
|
|
|
+ entry_ptr_curr_pc = thr->ptr_curr_pc; /* may be NULL */
|
|
|
+
|
|
|
idx_func = duk_normalize_index(ctx, -num_stack_args - 2); /* idx_func must be valid, note: non-throwing! */
|
|
|
idx_args = idx_func + 2; /* idx_args is not necessarily valid if num_stack_args == 0 (idx_args then equals top) */
|
|
|
|
|
|
@@ -54977,6 +54973,9 @@ duk_int_t duk_handle_call(duk_hthread *thr,
|
|
|
|
|
|
DUK_DDD(DUK_DDDPRINT("call is not protected -> clean up and rethrow"));
|
|
|
|
|
|
+ /* Restore entry thread executor curr_pc stack frame pointer. */
|
|
|
+ thr->ptr_curr_pc = entry_ptr_curr_pc;
|
|
|
+
|
|
|
DUK_HEAP_SWITCH_THREAD(thr->heap, entry_curr_thread); /* may be NULL */
|
|
|
thr->state = entry_thread_state;
|
|
|
DUK_ASSERT((thr->state == DUK_HTHREAD_STATE_INACTIVE && thr->heap->curr_thread == NULL) || /* first call */
|
|
|
@@ -55341,7 +55340,8 @@ duk_int_t duk_handle_call(duk_hthread *thr,
|
|
|
* other invalid
|
|
|
*/
|
|
|
|
|
|
- thr->curr_pc = NULL;
|
|
|
+ /* For native calls must be NULL so we don't sync back */
|
|
|
+ thr->ptr_curr_pc = NULL;
|
|
|
|
|
|
if (func) {
|
|
|
rc = ((duk_hnativefunction *) func)->func((duk_context *) thr);
|
|
|
@@ -55454,6 +55454,7 @@ duk_int_t duk_handle_call(duk_hthread *thr,
|
|
|
*
|
|
|
*/
|
|
|
|
|
|
+ /* thr->ptr_curr_pc is set by bytecode executor early on entry */
|
|
|
DUK_DDD(DUK_DDDPRINT("entering bytecode execution"));
|
|
|
duk_js_execute_bytecode(thr);
|
|
|
DUK_DDD(DUK_DDDPRINT("returned from bytecode execution"));
|
|
|
@@ -55553,12 +55554,8 @@ duk_int_t duk_handle_call(duk_hthread *thr,
|
|
|
DUK_DDD(DUK_DDDPRINT("setjmp catchpoint torn down"));
|
|
|
}
|
|
|
|
|
|
- /* Restore entry thread curr_pc (could just reset from topmost
|
|
|
- * activation too).
|
|
|
- */
|
|
|
- if (entry_curr_thread) {
|
|
|
- entry_curr_thread->curr_pc = entry_thr_curr_pc;
|
|
|
- }
|
|
|
+ /* Restore entry thread executor curr_pc stack frame pointer. */
|
|
|
+ thr->ptr_curr_pc = entry_ptr_curr_pc;
|
|
|
|
|
|
DUK_HEAP_SWITCH_THREAD(thr->heap, entry_curr_thread); /* may be NULL */
|
|
|
thr->state = (duk_uint8_t) entry_thread_state;
|
|
|
@@ -55692,6 +55689,7 @@ duk_int_t duk_handle_safe_call(duk_hthread *thr,
|
|
|
duk_int_t entry_call_recursion_depth;
|
|
|
duk_hthread *entry_curr_thread;
|
|
|
duk_uint_fast8_t entry_thread_state;
|
|
|
+ duk_instr_t **entry_ptr_curr_pc;
|
|
|
duk_jmpbuf *old_jmpbuf_ptr = NULL;
|
|
|
duk_jmpbuf our_jmpbuf;
|
|
|
duk_tval tv_tmp;
|
|
|
@@ -55709,6 +55707,7 @@ duk_int_t duk_handle_safe_call(duk_hthread *thr,
|
|
|
entry_call_recursion_depth = thr->heap->call_recursion_depth;
|
|
|
entry_curr_thread = thr->heap->curr_thread; /* Note: may be NULL if first call */
|
|
|
entry_thread_state = thr->state;
|
|
|
+ entry_ptr_curr_pc = thr->ptr_curr_pc; /* may be NULL */
|
|
|
idx_retbase = duk_get_top(ctx) - num_stack_args; /* Note: not a valid stack index if num_stack_args == 0 */
|
|
|
|
|
|
/* Note: cannot portably debug print a function pointer, hence 'func' not printed! */
|
|
|
@@ -55928,6 +55927,9 @@ duk_int_t duk_handle_safe_call(duk_hthread *thr,
|
|
|
|
|
|
DUK_DDD(DUK_DDDPRINT("setjmp catchpoint torn down"));
|
|
|
|
|
|
+ /* Restore entry thread executor curr_pc stack frame pointer. */
|
|
|
+ thr->ptr_curr_pc = entry_ptr_curr_pc;
|
|
|
+
|
|
|
/* XXX: because we unwind stacks above, thr->heap->curr_thread is at
|
|
|
* risk of pointing to an already freed thread. This was indeed the
|
|
|
* case in test-bug-multithread-valgrind.c, until duk_handle_call()
|
|
|
@@ -66053,10 +66055,10 @@ DUK_LOCAL void duk__executor_handle_debugger(duk_hthread *thr, duk_activation *a
|
|
|
*/
|
|
|
|
|
|
#define DUK__STRICT() (DUK_HOBJECT_HAS_STRICT(&(fun)->obj))
|
|
|
-#define DUK__REG(x) (thr->valstack_bottom[(x)])
|
|
|
-#define DUK__REGP(x) (&thr->valstack_bottom[(x)])
|
|
|
-#define DUK__CONST(x) (DUK_HCOMPILEDFUNCTION_GET_CONSTS_BASE(thr->heap, fun)[(x)])
|
|
|
-#define DUK__CONSTP(x) (&DUK_HCOMPILEDFUNCTION_GET_CONSTS_BASE(thr->heap, fun)[(x)])
|
|
|
+#define DUK__REG(x) (*(thr->valstack_bottom + (x)))
|
|
|
+#define DUK__REGP(x) (thr->valstack_bottom + (x))
|
|
|
+#define DUK__CONST(x) (*(consts + (x)))
|
|
|
+#define DUK__CONSTP(x) (consts + (x))
|
|
|
#define DUK__REGCONST(x) ((x) < DUK_BC_REGLIMIT ? DUK__REG((x)) : DUK__CONST((x) - DUK_BC_REGLIMIT))
|
|
|
#define DUK__REGCONSTP(x) ((x) < DUK_BC_REGLIMIT ? DUK__REGP((x)) : DUK__CONSTP((x) - DUK_BC_REGLIMIT))
|
|
|
|
|
|
@@ -66080,10 +66082,17 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
volatile duk_int_t entry_call_recursion_depth;
|
|
|
duk_jmpbuf * volatile entry_jmpbuf_ptr;
|
|
|
|
|
|
+ /* current PC, volatile because it is accessed by other functions
|
|
|
+ * through thr->ptr_to_curr_pc. Critical for performance. It would
|
|
|
+ * be safest to make this volatile, but that eliminates performance
|
|
|
+ * benefits. Aliasing guarantees should be enough though.
|
|
|
+ */
|
|
|
+ duk_instr_t *curr_pc; /* stable */
|
|
|
+
|
|
|
/* "hot" variables for interpretation -- not volatile, value not guaranteed in setjmp error handling */
|
|
|
duk_hthread *thr; /* stable */
|
|
|
duk_hcompiledfunction *fun; /* stable */
|
|
|
- /* 'consts' is computed on-the-fly */
|
|
|
+ duk_tval *consts; /* stable */
|
|
|
/* 'funcs' is quite rarely used, so no local for it */
|
|
|
|
|
|
/* "hot" temps for interpretation -- not volatile, value not guaranteed in setjmp error handling */
|
|
|
@@ -66222,8 +66231,8 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
* memory anyway.
|
|
|
*
|
|
|
* Any 'goto restart_execution;' code path in opcode dispatch must
|
|
|
- * ensure thr->curr_pc is synced back to act->curr_pc before the
|
|
|
- * goto takes place.
|
|
|
+ * ensure 'curr_pc' is synced back to act->curr_pc before the goto
|
|
|
+ * takes place.
|
|
|
*/
|
|
|
|
|
|
restart_execution:
|
|
|
@@ -66238,6 +66247,8 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
DUK_ASSERT(DUK_ACT_GET_FUNC(thr->callstack + thr->callstack_top - 1) != NULL);
|
|
|
DUK_ASSERT(DUK_HOBJECT_IS_COMPILEDFUNCTION(DUK_ACT_GET_FUNC(thr->callstack + thr->callstack_top - 1)));
|
|
|
|
|
|
+ thr->ptr_curr_pc = &curr_pc;
|
|
|
+
|
|
|
/* Assume interrupt init/counter are properly initialized here. */
|
|
|
|
|
|
/* assume that thr->valstack_bottom has been set-up before getting here */
|
|
|
@@ -66248,6 +66259,8 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
fun = (duk_hcompiledfunction *) DUK_ACT_GET_FUNC(act);
|
|
|
DUK_ASSERT(fun != NULL);
|
|
|
DUK_ASSERT(thr->valstack_top - thr->valstack_bottom == fun->nregs);
|
|
|
+ consts = DUK_HCOMPILEDFUNCTION_GET_CONSTS_BASE(thr->heap, fun);
|
|
|
+ DUK_ASSERT(consts != NULL);
|
|
|
}
|
|
|
|
|
|
#if defined(DUK_USE_DEBUGGER_SUPPORT)
|
|
|
@@ -66312,11 +66325,11 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
valstack_top_base = (duk_size_t) (thr->valstack_top - thr->valstack);
|
|
|
#endif
|
|
|
|
|
|
- /* Set up thr->curr_pc for opcode dispatch. */
|
|
|
+ /* Set up curr_pc for opcode dispatch. */
|
|
|
{
|
|
|
duk_activation *act;
|
|
|
act = thr->callstack + thr->callstack_top - 1;
|
|
|
- thr->curr_pc = act->curr_pc;
|
|
|
+ curr_pc = act->curr_pc;
|
|
|
}
|
|
|
|
|
|
for (;;) {
|
|
|
@@ -66343,12 +66356,12 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
{
|
|
|
duk_activation *act;
|
|
|
act = thr->callstack + thr->callstack_top - 1;
|
|
|
- act->curr_pc = thr->curr_pc;
|
|
|
+ act->curr_pc = (duk_instr_t *) curr_pc;
|
|
|
}
|
|
|
|
|
|
exec_int_ret = duk__executor_interrupt(thr);
|
|
|
if (exec_int_ret == DUK__INT_RESTART) {
|
|
|
- /* thr->curr_pc synced back above */
|
|
|
+ /* curr_pc synced back above */
|
|
|
goto restart_execution;
|
|
|
}
|
|
|
}
|
|
|
@@ -66361,18 +66374,18 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
{
|
|
|
duk_activation *act;
|
|
|
act = thr->callstack + thr->callstack_top - 1;
|
|
|
- DUK_ASSERT(thr->curr_pc >= DUK_HCOMPILEDFUNCTION_GET_CODE_BASE(thr->heap, fun));
|
|
|
- DUK_ASSERT(thr->curr_pc < DUK_HCOMPILEDFUNCTION_GET_CODE_END(thr->heap, fun));
|
|
|
+ DUK_ASSERT(curr_pc >= DUK_HCOMPILEDFUNCTION_GET_CODE_BASE(thr->heap, fun));
|
|
|
+ DUK_ASSERT(curr_pc < DUK_HCOMPILEDFUNCTION_GET_CODE_END(thr->heap, fun));
|
|
|
DUK_UNREF(act); /* if debugging disabled */
|
|
|
|
|
|
DUK_DDD(DUK_DDDPRINT("executing bytecode: pc=%ld, ins=0x%08lx, op=%ld, valstack_top=%ld/%ld, nregs=%ld --> %!I",
|
|
|
- (long) (thr->curr_pc - DUK_HCOMPILEDFUNCTION_GET_CODE_BASE(thr->heap, fun)),
|
|
|
- (unsigned long) *thr->curr_pc,
|
|
|
- (long) DUK_DEC_OP(*thr->curr_pc),
|
|
|
+ (long) (curr_pc - DUK_HCOMPILEDFUNCTION_GET_CODE_BASE(thr->heap, fun)),
|
|
|
+ (unsigned long) *curr_pc,
|
|
|
+ (long) DUK_DEC_OP(*curr_pc),
|
|
|
(long) (thr->valstack_top - thr->valstack),
|
|
|
(long) (thr->valstack_end - thr->valstack),
|
|
|
(long) (fun ? fun->nregs : -1),
|
|
|
- (duk_instr_t) *thr->curr_pc));
|
|
|
+ (duk_instr_t) *curr_pc));
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
@@ -66392,7 +66405,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- ins = *thr->curr_pc++;
|
|
|
+ ins = *curr_pc++;
|
|
|
|
|
|
/* Typing: use duk_small_(u)int_fast_t when decoding small
|
|
|
* opcode fields (op, A, B, C) and duk_(u)int_fast_t when
|
|
|
@@ -66656,7 +66669,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
* even if the constructor is an Ecmascript function.
|
|
|
*/
|
|
|
|
|
|
- /* Don't need to sync thr->curr_pc here; duk_new() will do that
|
|
|
+ /* Don't need to sync curr_pc here; duk_new() will do that
|
|
|
* when it augments the created error.
|
|
|
*/
|
|
|
|
|
|
@@ -67274,7 +67287,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
tmp = duk_js_toboolean(DUK__REGCONSTP(b));
|
|
|
if (tmp == (duk_bool_t) a) {
|
|
|
/* if boolean matches A, skip next inst */
|
|
|
- thr->curr_pc++;
|
|
|
+ curr_pc++;
|
|
|
} else {
|
|
|
;
|
|
|
}
|
|
|
@@ -67284,7 +67297,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
case DUK_OP_JUMP: {
|
|
|
duk_int_fast_t abc = DUK_DEC_ABC(ins);
|
|
|
|
|
|
- thr->curr_pc += abc - DUK_BC_JUMP_BIAS;
|
|
|
+ curr_pc += abc - DUK_BC_JUMP_BIAS;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
@@ -67435,7 +67448,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
* Avoid C recursion by being clever.
|
|
|
*/
|
|
|
DUK_DDD(DUK_DDDPRINT("ecma-to-ecma call setup possible, restart execution"));
|
|
|
- /* thr->curr_pc synced by duk_handle_ecma_call_setup() */
|
|
|
+ /* curr_pc synced by duk_handle_ecma_call_setup() */
|
|
|
goto restart_execution;
|
|
|
}
|
|
|
|
|
|
@@ -67515,7 +67528,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
/* When debugger is enabled, we need to recheck the activation
|
|
|
* status after returning.
|
|
|
*/
|
|
|
- /* call handling has synced thr->curr_pc */
|
|
|
+ /* call handling has synced curr_pc */
|
|
|
goto restart_execution;
|
|
|
#endif
|
|
|
break;
|
|
|
@@ -67664,7 +67677,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
|
|
|
cat = thr->catchstack + thr->catchstack_top - 1; /* relookup (side effects) */
|
|
|
cat->callstack_index = thr->callstack_top - 1;
|
|
|
- cat->pc_base = thr->curr_pc; /* pre-incremented, points to first jump slot */
|
|
|
+ cat->pc_base = (duk_instr_t *) curr_pc; /* pre-incremented, points to first jump slot */
|
|
|
cat->idx_base = (duk_size_t) (thr->valstack_bottom - thr->valstack) + bc;
|
|
|
|
|
|
DUK_DDD(DUK_DDDPRINT("TRYCATCH catcher: flags=0x%08lx, callstack_index=%ld, pc_base=%ld, "
|
|
|
@@ -67672,7 +67685,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
(unsigned long) cat->flags, (long) cat->callstack_index,
|
|
|
(long) cat->pc_base, (long) cat->idx_base, (duk_heaphdr *) cat->h_varname));
|
|
|
|
|
|
- thr->curr_pc += 2; /* skip jump slots */
|
|
|
+ curr_pc += 2; /* skip jump slots */
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
@@ -68072,7 +68085,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
/* [ ... enum ] -> [ ... next_key ] */
|
|
|
DUK_DDD(DUK_DDDPRINT("enum active, next key is %!T, skip jump slot ",
|
|
|
(duk_tval *) duk_get_tval(ctx, -1)));
|
|
|
- thr->curr_pc++;
|
|
|
+ curr_pc++;
|
|
|
} else {
|
|
|
/* [ ... enum ] -> [ ... ] */
|
|
|
DUK_DDD(DUK_DDDPRINT("enum finished, execute jump slot"));
|
|
|
@@ -68192,7 +68205,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
/* no need to unwind callstack */
|
|
|
}
|
|
|
|
|
|
- thr->curr_pc = cat->pc_base + 1;
|
|
|
+ curr_pc = cat->pc_base + 1;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
@@ -68251,7 +68264,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
/* no need to unwind callstack */
|
|
|
}
|
|
|
|
|
|
- thr->curr_pc = cat->pc_base + 1;
|
|
|
+ curr_pc = cat->pc_base + 1;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
@@ -68464,7 +68477,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
|
|
|
cat->flags = DUK_CAT_TYPE_LABEL | (bc << DUK_CAT_LABEL_SHIFT);
|
|
|
cat->callstack_index = thr->callstack_top - 1;
|
|
|
- cat->pc_base = thr->curr_pc; /* pre-incremented, points to first jump slot */
|
|
|
+ cat->pc_base = (duk_instr_t *) curr_pc; /* pre-incremented, points to first jump slot */
|
|
|
cat->idx_base = 0; /* unused for label */
|
|
|
cat->h_varname = NULL;
|
|
|
|
|
|
@@ -68473,7 +68486,7 @@ DUK_INTERNAL void duk_js_execute_bytecode(duk_hthread *exec_thr) {
|
|
|
(long) cat->flags, (long) cat->callstack_index, (long) cat->pc_base,
|
|
|
(long) cat->idx_base, (duk_heaphdr *) cat->h_varname, (long) DUK_CAT_GET_LABEL(cat)));
|
|
|
|
|
|
- thr->curr_pc += 2; /* skip jump slots */
|
|
|
+ curr_pc += 2; /* skip jump slots */
|
|
|
break;
|
|
|
}
|
|
|
|